From e99d06dd149454e1c6eae37ac16bb0a8f9cfb88f Mon Sep 17 00:00:00 2001 From: bmaidics Date: Sat, 15 Jul 2023 00:12:09 +0200 Subject: [PATCH 001/115] Mqtt retained feature (#290) --- .../client.rpt | 51 + .../server.rpt | 50 + .../client.rpt | 55 + .../server.rpt | 54 + .../client.rpt | 52 + .../server.rpt | 50 + .../client.rpt | 52 + .../server.rpt | 49 + .../streams/kafka/publish.retained/client.rpt | 113 ++ .../streams/kafka/publish.retained/server.rpt | 113 ++ .../client.rpt | 235 ++++ .../server.rpt | 246 ++++ .../client.rpt | 206 +++ .../server.rpt | 218 +++ .../client.rpt | 219 +++ .../server.rpt | 223 +++ .../subscribe.filter.change.retain/client.rpt | 204 +++ .../subscribe.filter.change.retain/server.rpt | 217 +++ .../subscribe.multiple.message/client.rpt | 52 + .../subscribe.multiple.message/server.rpt | 75 ++ .../client.rpt | 1 + .../server.rpt | 1 + .../streams/kafka/subscribe.retain/client.rpt | 99 ++ .../streams/kafka/subscribe.retain/server.rpt | 99 ++ .../client.rpt | 64 + .../server.rpt | 60 + .../client.rpt | 37 + .../server.rpt | 39 + .../mqtt/publish.empty.message/client.rpt | 1 - .../mqtt/publish.empty.message/server.rpt | 3 +- .../mqtt/publish.multiple.messages/client.rpt | 3 - .../mqtt/publish.multiple.messages/server.rpt | 5 +- .../mqtt/publish.one.message/client.rpt | 1 - .../mqtt/publish.one.message/server.rpt | 3 +- .../client.rpt | 31 + .../server.rpt | 32 + .../client.rpt | 31 + .../server.rpt | 34 + .../client.rpt | 31 + .../server.rpt | 32 + .../client.rpt | 31 + .../server.rpt | 32 + .../streams/mqtt/publish.retained/client.rpt | 60 + .../streams/mqtt/publish.retained/server.rpt | 60 + .../mqtt/publish.server.sent.abort/server.rpt | 2 +- .../mqtt/publish.server.sent.data/server.rpt | 2 +- .../mqtt/publish.server.sent.flush/server.rpt | 2 +- .../mqtt/publish.server.sent.reset/server.rpt | 2 +- .../client.rpt | 1 - .../server.rpt | 3 +- .../client.rpt | 1 - .../server.rpt | 3 +- .../publish.with.user.property/client.rpt | 1 - .../publish.with.user.property/server.rpt | 3 +- .../subscribe.client.sent.abort/client.rpt | 2 +- .../subscribe.client.sent.abort/server.rpt | 2 +- .../subscribe.client.sent.data/client.rpt | 2 +- .../subscribe.client.sent.data/server.rpt | 2 +- .../subscribe.client.sent.reset/client.rpt | 2 +- .../subscribe.client.sent.reset/server.rpt | 2 +- .../client.rpt | 116 ++ .../server.rpt | 124 ++ .../client.rpt | 100 ++ .../server.rpt | 108 ++ .../subscribe.filter.change.retain/client.rpt | 117 ++ .../subscribe.filter.change.retain/server.rpt | 125 ++ .../subscribe.multiple.message/client.rpt | 48 + .../subscribe.multiple.message/server.rpt | 42 + .../mqtt/subscribe.one.message/client.rpt | 2 +- .../mqtt/subscribe.one.message/server.rpt | 2 +- .../subscribe.publish.no.local/client.rpt | 3 +- .../subscribe.publish.no.local/server.rpt | 5 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../subscribe.retain.as.published/client.rpt | 52 + .../subscribe.retain.as.published/server.rpt | 56 + .../streams/mqtt/subscribe.retain/client.rpt | 51 + .../streams/mqtt/subscribe.retain/server.rpt | 55 + .../client.rpt | 30 + .../server.rpt | 31 + .../client.rpt | 30 + .../server.rpt | 31 + .../client.rpt | 30 + .../server.rpt | 31 + .../subscribe.server.sent.abort/client.rpt | 2 +- .../subscribe.server.sent.abort/server.rpt | 2 +- .../subscribe.server.sent.flush/client.rpt | 2 +- .../subscribe.server.sent.flush/server.rpt | 2 +- .../subscribe.server.sent.reset/client.rpt | 2 +- .../subscribe.server.sent.reset/server.rpt | 2 +- .../binding/mqtt/kafka/streams/KafkaIT.java | 108 ++ .../binding/mqtt/kafka/streams/MqttIT.java | 54 + incubator/binding-mqtt-kafka/pom.xml | 2 +- .../internal/MqttKafkaConfiguration.java | 9 +- .../stream/MqttKafkaPublishFactory.java | 538 ++++++-- .../stream/MqttKafkaSubscribeFactory.java | 1200 ++++++++++++++--- .../src/main/zilla/internal.idl | 28 + .../internal/MqttKafkaConfigurationTest.java | 35 + .../stream/MqttKafkaPublishProxyIT.java | 50 + .../stream/MqttKafkaSubscribeProxyIT.java | 92 ++ .../binding/mqtt/internal/MqttFunctions.java | 110 +- .../main/resources/META-INF/zilla/mqtt.idl | 4 +- .../application/client.sent.close/client.rpt | 23 + .../application/client.sent.close/server.rpt | 25 + .../client.rpt | 71 + .../server.rpt | 70 + .../publish.empty.message/client.rpt | 1 - .../publish.empty.message/server.rpt | 3 +- .../publish.empty.retained.message/client.rpt | 2 +- .../publish.empty.retained.message/server.rpt | 4 +- .../client.rpt | 1 - .../server.rpt | 3 +- .../client.rpt | 4 - .../server.rpt | 8 +- .../client.rpt | 2 - .../server.rpt | 4 +- .../client.rpt | 3 - .../server.rpt | 5 +- .../client.rpt | 3 - .../server.rpt | 7 +- .../publish.multiple.messages/client.rpt | 3 - .../publish.multiple.messages/server.rpt | 5 +- .../client.rpt | 2 - .../server.rpt | 4 +- .../publish.one.message/client.rpt | 1 - .../publish.one.message/server.rpt | 3 +- .../application/publish.retained/client.rpt | 2 +- .../application/publish.retained/server.rpt | 4 +- .../client.rpt | 1 - .../server.rpt | 3 +- .../client.rpt | 1 - .../server.rpt | 3 +- .../publish.with.user.property/client.rpt | 1 - .../publish.with.user.property/server.rpt | 3 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../subscribe.one.message/client.rpt | 2 +- .../subscribe.publish.no.local/client.rpt | 4 +- .../subscribe.publish.no.local/server.rpt | 6 +- .../client.rpt | 1 - .../server.rpt | 3 +- .../client.rpt | 3 +- .../server.rpt | 5 +- .../subscribe.receive.message/client.rpt | 1 - .../subscribe.receive.message/server.rpt | 3 +- .../client.rpt | 1 - .../server.rpt | 1 - .../client.rpt | 2 +- .../server.rpt | 2 +- .../connect.subscribe.unfragmented/client.rpt | 2 +- .../connect.subscribe.unfragmented/server.rpt | 2 +- .../client.rpt | 58 + .../server.rpt | 58 + .../network/ping.keep.alive/client.rpt | 2 +- .../network/ping.keep.alive/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../session.client.takeover/client.rpt | 2 +- .../session.client.takeover/server.rpt | 2 +- .../session.exists.clean.start/client.rpt | 2 +- .../session.exists.clean.start/server.rpt | 2 +- .../network/session.subscribe/client.rpt | 2 +- .../network/session.subscribe/server.rpt | 2 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 59 + .../server.rpt | 61 + .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../network/subscribe.one.message/client.rpt | 2 +- .../network/subscribe.one.message/server.rpt | 2 +- .../subscribe.publish.no.local/client.rpt | 2 +- .../subscribe.publish.no.local/server.rpt | 2 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../subscribe.receive.message/client.rpt | 2 +- .../subscribe.receive.message/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 6 +- .../client.rpt | 4 +- .../server.rpt | 6 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../unsubscribe.after.subscribe/client.rpt | 2 +- .../unsubscribe.after.subscribe/server.rpt | 2 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../mqtt/internal/MqttFunctionsTest.java | 63 +- .../streams/application/ConnectionIT.java | 18 + .../mqtt/streams/application/PublishIT.java | 2 +- .../mqtt/streams/network/ConnectionIT.java | 9 + .../mqtt/streams/network/PublishIT.java | 2 +- .../internal/stream/MqttServerFactory.java | 111 +- .../mqtt/internal/stream/ConnectionIT.java | 16 + .../command/log/internal/LoggableStream.java | 5 +- 237 files changed, 7076 insertions(+), 694 deletions(-) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka/src/main/zilla/internal.idl create mode 100644 incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt new file mode 100644 index 0000000000..df2ebc41b2 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt @@ -0,0 +1,51 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write notify MESSAGES_DONE + +connect await MESSAGES_DONE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt new file mode 100644 index 0000000000..4532e49e0f --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt @@ -0,0 +1,50 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt new file mode 100644 index 0000000000..dd926e3179 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt @@ -0,0 +1,55 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + + +write notify MESSAGES_DONE + +connect await MESSAGES_DONE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +read zilla:data.empty + +read abort + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt new file mode 100644 index 0000000000..3fca45a21a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt @@ -0,0 +1,54 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +write zilla:data.empty + +write aborted + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt new file mode 100644 index 0000000000..d4deb19b4f --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write notify MESSAGES_DONE + + +connect await MESSAGES_DONE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt new file mode 100644 index 0000000000..d9deb564ae --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt @@ -0,0 +1,50 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write advise zilla:flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt new file mode 100644 index 0000000000..6548ccce19 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write notify MESSAGES_DONE + +connect await MESSAGES_DONE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write aborted + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt new file mode 100644 index 0000000000..6a696cbb2a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt new file mode 100644 index 0000000000..11f5ae9078 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt @@ -0,0 +1,113 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message3" + +write notify MESSAGE_DELIVERED + +connect await MESSAGE_DELIVERED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write advise zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" + +write advise zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt new file mode 100644 index 0000000000..700f0de29a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt @@ -0,0 +1,113 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message3" + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +read advised zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt new file mode 100644 index 0000000000..bab0d0afc7 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt @@ -0,0 +1,235 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + + +connected + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("three") + .build() + .build() + .build() + .build()} + +write notify RETAIN_FINISHED + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message3" + +write notify MESSAGES_FINISHED + +connect await MESSAGES_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("three") + .build() + .build() + .build() + .build()} + +write notify SECOND_FLUSH_SENT + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(2) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/three") + .header("zilla:topic", "sensor") + .header("zilla:topic", "three") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read advised zilla:flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt new file mode 100644 index 0000000000..b1ded1a193 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt @@ -0,0 +1,246 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .build() + .build()} + + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("three") + .build() + .build() + .build() + .build()} + +read await RETAIN_FINISHED + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message3" +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write notify FIRST_RETAINED_SENT + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("three") + .build() + .build() + .build() + .build()} +read await SECOND_FLUSH_SENT + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(2) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/three") + .header("zilla:topic", "sensor") + .header("zilla:topic", "three") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write advise zilla:flush + +read closed +write close + +write notify RETAIN_FINISHED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt new file mode 100644 index 0000000000..0a11769047 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt @@ -0,0 +1,206 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + + +connected + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .build() + .build()} +write notify RETAIN_STARTED +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message3" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(2) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +write notify BUFFERED_MESSAGES_ARRIVED + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message4" + +write notify MESSAGES_FINISHED + +connect await MESSAGES_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +read advised zilla:flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt new file mode 100644 index 0000000000..3bc279f7f4 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt @@ -0,0 +1,218 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .build() + .build()} + + +read await RETAIN_STARTED +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message3" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(2) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +read await BUFFERED_MESSAGES_ARRIVED +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message4" +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write notify RETAIN_STARTED + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +write advise zilla:flush + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt new file mode 100644 index 0000000000..d2a71faf71 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt @@ -0,0 +1,219 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read advised zilla:flush + +write close +read closed + +write notify RETAINED_FINISHED + +connect await RETAINED_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + + +connected + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .build() + .build()} + +write notify RETAIN_FINISHED + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message3" +write notify MESSAGES_FINISHED + +connect await MESSAGES_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read advised zilla:flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt new file mode 100644 index 0000000000..8d43da6ad9 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt @@ -0,0 +1,223 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write advise zilla:flush + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .build() + .build()} + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .build() + .build()} + +read await RETAIN_FINISHED +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message3" +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write advise zilla:flush + +read closed +write close + +write notify RETAIN_FINISHED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt new file mode 100644 index 0000000000..92859cd311 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt @@ -0,0 +1,204 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + + +connected + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .build() + .build()} + +write notify MESSAGES_FINISHED + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message3" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(2) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message4" + +connect await MESSAGES_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +read advised zilla:flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt new file mode 100644 index 0000000000..b2f8912a8d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt @@ -0,0 +1,217 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .build() + .build()} + + +read await RETAIN_FINISHED +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message3" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(2) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message4" +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("two") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/two") + .header("zilla:topic", "sensor") + .header("zilla:topic", "two") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + +write advise zilla:flush + +read closed +write close + +write notify RETAIN_FINISHED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt new file mode 100644 index 0000000000..b865d030ac --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt new file mode 100644 index 0000000000..76dac38228 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt @@ -0,0 +1,75 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt index 4751574dcd..a99b18fde2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -52,6 +52,7 @@ write advise zilla:flush ${kafka:flushEx() .skip(1) .sequence(1) .build() + .headerNot("zilla:local", "client") .build() .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt index a3bbe5e066..d1e3e9dcf2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -54,6 +54,7 @@ read advised zilla:flush ${kafka:flushEx() .skip(1) .sequence(1) .build() + .headerNot("zilla:local", "client") .build() .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt new file mode 100644 index 0000000000..d58a20ae17 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt @@ -0,0 +1,99 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +read advised zilla:flush + +write close +read closed + +write notify RETAINED_FINISHED + +connect await RETAINED_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt new file mode 100644 index 0000000000..e11b2bc16a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt @@ -0,0 +1,99 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +write advise zilla:flush + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt new file mode 100644 index 0000000000..3634d3a018 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt @@ -0,0 +1,64 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read aborted + +write notify RETAINED_FINISHED + +connect await RETAINED_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read aborted + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt new file mode 100644 index 0000000000..5029a11921 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt @@ -0,0 +1,60 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write abort + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt new file mode 100644 index 0000000000..a78a268f35 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt new file mode 100644 index 0000000000..3ee4685f5d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt @@ -0,0 +1,39 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_retained") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt index aefebf981a..3e8a298f5f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt @@ -30,7 +30,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} write zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt index 2ab73c22f8..4921b6f355 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt @@ -20,7 +20,7 @@ accept "zilla://streams/mqtt0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -33,6 +33,5 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt index f6814f1033..e53c7280ef 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt @@ -30,7 +30,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} @@ -41,7 +40,6 @@ write flush write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} @@ -52,7 +50,6 @@ write flush write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt index bb7207c9b7..3d6a18ceef 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt @@ -19,7 +19,7 @@ accept "zilla://streams/mqtt0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -32,7 +32,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} @@ -42,7 +41,6 @@ read "message1" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} @@ -52,7 +50,6 @@ read "message2" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt index 32e92654da..643c9a0e10 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt @@ -30,7 +30,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(15) .contentType("message") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt index 44b70e87bd..afa02348b8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt @@ -20,7 +20,7 @@ accept "zilla://streams/mqtt0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -33,7 +33,6 @@ connected read zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(15) .contentType("message") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt new file mode 100644 index 0000000000..305f243c8d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt @@ -0,0 +1,31 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt new file mode 100644 index 0000000000..1ffe3f4ef5 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt @@ -0,0 +1,32 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt new file mode 100644 index 0000000000..0f0e0f2879 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt @@ -0,0 +1,31 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt new file mode 100644 index 0000000000..25c52a4dac --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt @@ -0,0 +1,34 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt new file mode 100644 index 0000000000..e62fd29994 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt @@ -0,0 +1,31 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt new file mode 100644 index 0000000000..0d654c898a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt @@ -0,0 +1,32 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +write advise zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt new file mode 100644 index 0000000000..734b829363 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt @@ -0,0 +1,31 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt new file mode 100644 index 0000000000..bd93328a97 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt @@ -0,0 +1,32 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt new file mode 100644 index 0000000000..2efb496b7c --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt @@ -0,0 +1,60 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .flags("RETAIN") + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +write "message3" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt new file mode 100644 index 0000000000..42e19c0983 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt @@ -0,0 +1,60 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .flags("RETAIN") + .format("TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +read "message3" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt index 47cd3c75b2..279dae2053 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt @@ -18,7 +18,7 @@ accept "zilla://streams/mqtt0" option zilla:transmission "duplex" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt index 3144b8ec89..c13d731570 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt @@ -20,7 +20,7 @@ accept "zilla://streams/mqtt0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt index d1f2b4a10b..4fae513f0c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt @@ -18,7 +18,7 @@ accept "zilla://streams/mqtt0" option zilla:transmission "duplex" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt index 971687400a..6940fdf073 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt @@ -18,7 +18,7 @@ accept "zilla://streams/mqtt0" option zilla:transmission "duplex" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt index a822ca4c87..b6c6f8a8ea 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt @@ -29,7 +29,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensors/1") .format("TEXT") .userProperty("row1", "1") .userProperty("row2", "2") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt index bdbd6d99bd..2d27454157 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt @@ -19,7 +19,7 @@ accept "zilla://streams/mqtt0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") @@ -31,7 +31,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensors/1") .format("TEXT") .userProperty("row1", "1") .userProperty("row2", "2") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt index 4d2aee3178..00078e021e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt @@ -29,7 +29,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensors/1") .format("TEXT") .userProperty("row1", "1") .userProperty("row1", "2") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt index f433deb0ea..8fbf71f2cd 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt @@ -19,7 +19,7 @@ accept "zilla://streams/mqtt0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") @@ -31,7 +31,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensors/1") .format("TEXT") .userProperty("row1", "1") .userProperty("row1", "2") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt index 7d068f7ed9..d2837a872e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt @@ -29,7 +29,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensors/1") .format("TEXT") .userProperty("row", "1") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt index d6f0e84f86..0ec620300b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt @@ -19,7 +19,7 @@ accept "zilla://streams/mqtt0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") @@ -31,7 +31,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensors/1") .format("TEXT") .userProperty("row", "1") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt index d5055b5edf..4ebd9d47b4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt index c9203bad55..d87d2aac1f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt @@ -22,7 +22,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt index 982a36477f..e4845f0b19 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt index 51f8ee0ecb..036048878e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt index 229c326ab4..ec964b8a62 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt index 0579bfee55..0877af4fe0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt @@ -22,7 +22,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt new file mode 100644 index 0000000000..d80d42f600 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt @@ -0,0 +1,116 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message2" + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build() + .build()} + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +read "message" + +read await FIRST_RETAINED_SENT +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .filter("sensor/three", 3, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} +write notify SECOND_FLUSH_SENT + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/three") + .subscriptionId(3) + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message3" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt new file mode 100644 index 0000000000..3c9d2bc5f6 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt @@ -0,0 +1,124 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build() + .build()} + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write notify FIRST_RETAINED_SENT + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .filter("sensor/three", 3, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/three") + .subscriptionId(3) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message3" +write flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt new file mode 100644 index 0000000000..5fca4ecb70 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt @@ -0,0 +1,100 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message" + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message2" + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message3" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt new file mode 100644 index 0000000000..f0ef61f05c --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt @@ -0,0 +1,108 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message3" +write flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt new file mode 100644 index 0000000000..d8a5fd2bee --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt @@ -0,0 +1,117 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message2" + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build() + .build()} + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message3" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +read "message2" + +write notify BUFFERED_MESSAGES_ARRIVED + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message4" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt new file mode 100644 index 0000000000..3480ca9925 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt @@ -0,0 +1,125 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1, "AT_MOST_ONCE") + .filter("sensor/two", 2, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build() + .build()} + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .flags("RETAIN") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message3" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message4" + + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt new file mode 100644 index 0000000000..f9f1557cc6 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt @@ -0,0 +1,48 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +read "message2" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt new file mode 100644 index 0000000000..809d879c90 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt @@ -0,0 +1,42 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt index 44776a4901..44d1399684 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt index 7663c47042..809d879c90 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt index 7255e543f2..2cc96bc157 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") .build() .build()} @@ -58,7 +58,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt index 05531897d5..a39048ad56 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") .build() .build()} connected @@ -40,7 +40,7 @@ write "message2" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -52,7 +52,6 @@ connected read zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt index 85e4b4534f..d52dffc7e9 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -31,7 +31,7 @@ write advise zilla:flush ${mqtt:flushEx() .typeId(zilla:id("mqtt")) .subscribe() .filter("sensor/+/#", 1) - .filter("sensor/+/1", 2) + .filter("sensor/+/1", 2, "AT_MOST_ONCE", "NO_LOCAL") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt index 0cca50e6eb..c686a815c8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -33,7 +33,7 @@ read advised zilla:flush ${mqtt:flushEx() .typeId(zilla:id("mqtt")) .subscribe() .filter("sensor/+/#", 1) - .filter("sensor/+/1", 2) + .filter("sensor/+/1", 2, "AT_MOST_ONCE", "NO_LOCAL") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt new file mode 100644 index 0000000000..a7816a5a76 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .flags("RETAIN") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message2" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt new file mode 100644 index 0000000000..723f81abaa --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt @@ -0,0 +1,56 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .flags("RETAIN") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt new file mode 100644 index 0000000000..0d766258ca --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt @@ -0,0 +1,51 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message2" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt new file mode 100644 index 0000000000..e3541ceb78 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt @@ -0,0 +1,55 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt new file mode 100644 index 0000000000..ac974f4d57 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt @@ -0,0 +1,30 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt new file mode 100644 index 0000000000..3f6d70884b --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt @@ -0,0 +1,31 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt new file mode 100644 index 0000000000..406f1f8f10 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt @@ -0,0 +1,30 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt new file mode 100644 index 0000000000..44de50ed01 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt @@ -0,0 +1,31 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +write advise zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt new file mode 100644 index 0000000000..900d02e518 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt @@ -0,0 +1,30 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt new file mode 100644 index 0000000000..12b2ba97a2 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt @@ -0,0 +1,31 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build() + .build()} + +connected + +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt index ac974f4d57..3bea6e1ab7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt index 3f6d70884b..fcc9e8f07c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt @@ -22,7 +22,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt index 406f1f8f10..0b10daa322 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt index 44de50ed01..1bfa319425 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt @@ -22,7 +22,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt index 900d02e518..85187364fb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt index 12b2ba97a2..773cbf8ff5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt @@ -22,7 +22,7 @@ read zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java index ea3d9100b5..0d54dbca57 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java @@ -89,6 +89,42 @@ public void shouldPublishAbortWhenServerSentData() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/publish.retained.server.sent.abort/client", + "${kafka}/publish.retained.server.sent.abort/server"}) + public void shouldPublishReceiveServerSentRetainedAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.retained.server.sent.flush/client", + "${kafka}/publish.retained.server.sent.flush/server"}) + public void shouldPublishReceiveServerSentRetainedFlush() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.retained.server.sent.reset/client", + "${kafka}/publish.retained.server.sent.reset/server"}) + public void shouldPublishReceiveServerSentRetainedReset() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/publish.retained.server.sent.data/client", + "${kafka}/publish.retained.server.sent.data/server"}) + public void shouldPublishAbortWhenServerSentRetainedData() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${kafka}/publish.empty.message/client", @@ -107,6 +143,15 @@ public void shouldSendOneMessage() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/publish.retained/client", + "${kafka}/publish.retained/server"}) + public void shouldPublishRetainedMessage() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${kafka}/publish.multiple.messages/client", @@ -197,6 +242,60 @@ public void shouldSubscribeReceiveServerSentReset() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/subscribe.retained.server.sent.abort/client", + "${kafka}/subscribe.retained.server.sent.abort/server"}) + public void shouldSubscribeReceiveServerSentRetainedAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/subscribe.retained.server.sent.reset/client", + "${kafka}/subscribe.retained.server.sent.reset/server"}) + public void shouldSubscribeReceiveServerSentRetainedReset() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/subscribe.filter.change.retain/client", + "${kafka}/subscribe.filter.change.retain/server"}) + public void shouldReceiveRetainedAfterFilterChange() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/subscribe.deferred.filter.change.retain/client", + "${kafka}/subscribe.deferred.filter.change.retain/server"}) + public void shouldReceiveRetainedAfterFilterDeferred() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/subscribe.filter.change.retain.buffer/client", + "${kafka}/subscribe.filter.change.retain.buffer/server"}) + public void shouldReceiveRetainedAfterFilterChangeBuffer() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/subscribe.filter.change.retain.resubscribe/client", + "${kafka}/subscribe.filter.change.retain.resubscribe/server"}) + public void shouldReceiveRetainedAfterFilterChangeResubscribe() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${kafka}/subscribe.one.message/client", @@ -233,6 +332,15 @@ public void shouldNotReceiveLocal() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/subscribe.retain/client", + "${kafka}/subscribe.retain/server"}) + public void shouldReceiveRetained() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${kafka}/subscribe.receive.message.wildcard/client", diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java index 9fd5a92980..034760c69d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java @@ -116,6 +116,15 @@ public void shouldSendMultipleMessages() throws Exception k3po.finish(); } + @Test + @Specification({ + "${mqtt}/publish.retained/client", + "${mqtt}/publish.retained/server"}) + public void shouldPublishRetainedMessage() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${mqtt}/publish.with.user.properties.distinct/client", @@ -215,6 +224,51 @@ public void shouldReceiveCorrelationData() throws Exception k3po.finish(); } + @Test + @Specification({ + "${mqtt}/subscribe.retain/client", + "${mqtt}/subscribe.retain/server"}) + public void shouldReceiveRetainedNoRetainAsPublished() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/subscribe.retain.as.published/client", + "${mqtt}/subscribe.retain.as.published/server"}) + public void shouldReceiveRetainAsPublished() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/subscribe.filter.change.retain/client", + "${mqtt}/subscribe.filter.change.retain/server"}) + public void shouldReceiveRetainedAfterFilterChange() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/subscribe.deferred.filter.change.retain/client", + "${mqtt}/subscribe.deferred.filter.change.retain/server"}) + public void shouldReceiveRetainedAfterFilterChangeDeferred() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/subscribe.filter.change.retain.resubscribe/client", + "${mqtt}/subscribe.filter.change.retain.resubscribe/server"}) + public void shouldReceiveRetainedAfterFilterChangeResubscribe() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${mqtt}/subscribe.publish.no.local/client", diff --git a/incubator/binding-mqtt-kafka/pom.xml b/incubator/binding-mqtt-kafka/pom.xml index f760a3c007..317e1603b2 100644 --- a/incubator/binding-mqtt-kafka/pom.xml +++ b/incubator/binding-mqtt-kafka/pom.xml @@ -108,7 +108,7 @@ flyweight-maven-plugin ${project.version} - core mqtt kafka + core mqtt kafka internal io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java index 256e54bdbc..72030e5d0c 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java @@ -21,11 +21,13 @@ public class MqttKafkaConfiguration extends Configuration private static final ConfigurationDef MQTT_KAFKA_CONFIG; public static final PropertyDef KAFKA_MESSAGES_TOPIC; + public static final PropertyDef KAFKA_RETAINED_MESSAGES_TOPIC; static { final ConfigurationDef config = new ConfigurationDef("zilla.binding.mqtt.kafka"); KAFKA_MESSAGES_TOPIC = config.property("messages.topic", "mqtt_messages"); + KAFKA_RETAINED_MESSAGES_TOPIC = config.property("retained.messages.topic", "mqtt_retained"); MQTT_KAFKA_CONFIG = config; } @@ -35,8 +37,13 @@ public MqttKafkaConfiguration( super(MQTT_KAFKA_CONFIG, config); } - public String kafkaMessagesTopic() + public String messagesTopic() { return KAFKA_MESSAGES_TOPIC.get(this); } + + public String retainedMessagesTopic() + { + return KAFKA_RETAINED_MESSAGES_TOPIC.get(this); + } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java index 665433e21e..6027dd0c31 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java @@ -36,6 +36,7 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaKeyFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormatFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPublishFlags; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.AbortFW; @@ -46,6 +47,7 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.FlushFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttPublishBeginExFW; @@ -55,12 +57,10 @@ import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.binding.BindingHandler; import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; -import io.aklivity.zilla.runtime.engine.concurrent.Signaler; public class MqttKafkaPublishFactory implements BindingHandler { - //TODO: these defaults should come from the binding config - private static final String KAFKA_MESSAGES_TOPIC_NAME = "mqtt_messages"; + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); private static final KafkaAckMode KAFKA_DEFAULT_ACK_MODE = KafkaAckMode.LEADER_ONLY; private static final String MQTT_TYPE_NAME = "mqtt"; private static final String KAFKA_TYPE_NAME = "kafka"; @@ -92,12 +92,11 @@ public class MqttKafkaPublishFactory implements BindingHandler private final MqttDataExFW.Builder mqttDataExRW = new MqttDataExFW.Builder(); private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); private final Array32FW.Builder kafkaHeadersRW = new Array32FW.Builder<>(new KafkaHeaderFW.Builder(), new KafkaHeaderFW()); private final MutableDirectBuffer writeBuffer; - private final MutableDirectBuffer keyBuffer; - private final MutableDirectBuffer headerIntValueBuffer; private final MutableDirectBuffer extBuffer; private final MutableDirectBuffer kafkaHeadersBuffer; private final BindingHandler streamFactory; @@ -106,14 +105,12 @@ public class MqttKafkaPublishFactory implements BindingHandler private final MqttKafkaHeaderHelper helper; private final int mqttTypeId; private final int kafkaTypeId; - private final Signaler signaler; private final LongFunction supplyBinding; - private KafkaKeyFW key; - - private OctetsFW[] topicNameHeaders; - private OctetsFW clientIdOctets; - private String16FW binaryFormat; - private String16FW textFormat; + private final String16FW binaryFormat; + private final String16FW textFormat; + private final String16FW kafkaTopic; + private final String16FW kafkaRetainedTopic; + private final int bufferCapacity; public MqttKafkaPublishFactory( MqttKafkaConfiguration config, @@ -122,19 +119,19 @@ public MqttKafkaPublishFactory( { this.mqttTypeId = context.supplyTypeId(MQTT_TYPE_NAME); this.kafkaTypeId = context.supplyTypeId(KAFKA_TYPE_NAME); - this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); - this.keyBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); - this.headerIntValueBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); - this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); - this.kafkaHeadersBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferCapacity = context.writeBuffer().capacity(); + this.writeBuffer = new UnsafeBuffer(new byte[bufferCapacity]); + this.extBuffer = new UnsafeBuffer(new byte[bufferCapacity]); + this.kafkaHeadersBuffer = new UnsafeBuffer(new byte[bufferCapacity]); this.helper = new MqttKafkaHeaderHelper(); - this.signaler = context.signaler(); this.streamFactory = context.streamFactory(); this.supplyInitialId = context::supplyInitialId; this.supplyReplyId = context::supplyReplyId; this.supplyBinding = supplyBinding; this.binaryFormat = new String16FW(MqttPayloadFormat.BINARY.name()); this.textFormat = new String16FW(MqttPayloadFormat.TEXT.name()); + this.kafkaTopic = new String16FW(config.messagesTopic()); + this.kafkaRetainedTopic = new String16FW(config.retainedMessagesTopic()); } @Override @@ -151,30 +148,6 @@ public MessageConsumer newStream( final long initialId = begin.streamId(); final long authorization = begin.authorization(); - final OctetsFW extension = begin.extension(); - final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); - - assert mqttBeginEx.kind() == MqttBeginExFW.KIND_PUBLISH; - final MqttPublishBeginExFW mqttPublishBeginEx = mqttBeginEx.publish(); - String topicName = mqttPublishBeginEx.topic().asString(); - assert topicName != null; - - String[] topicHeaders = topicName.split("/"); - topicNameHeaders = new OctetsFW[topicHeaders.length]; - for (int i = 0; i < topicHeaders.length; i++) - { - String16FW topicHeader = new String16FW(topicHeaders[i]); - topicNameHeaders[i] = new OctetsFW().wrap(topicHeader.value(), 0, topicHeader.length()); - } - clientIdOctets = new OctetsFW() - .wrap(mqttPublishBeginEx.clientId().value(), 0, mqttPublishBeginEx.clientId().length()); - final DirectBuffer topicNameBuffer = mqttPublishBeginEx.topic().value(); - key = new KafkaKeyFW.Builder() - .wrap(keyBuffer, 0, keyBuffer.capacity()) - .length(topicNameBuffer.capacity()) - .value(topicNameBuffer, 0, topicNameBuffer.capacity()) - .build(); - final MqttKafkaBindingConfig binding = supplyBinding.apply(routedId); final MqttKafkaRouteConfig resolved = binding != null ? binding.resolve(authorization) : null; @@ -198,7 +171,8 @@ private final class MqttPublishProxy private final long routedId; private final long initialId; private final long replyId; - private final KafkaProxy delegate; + private final KafkaMessagesProxy messages; + private final KafkaRetainedProxy retained; private int state; @@ -211,6 +185,13 @@ private final class MqttPublishProxy private int replyMax; private int replyPad; + private KafkaKeyFW key; + + private OctetsFW[] topicNameHeaders; + private OctetsFW clientIdOctets; + private boolean retainAvailable; + + private MqttPublishProxy( MessageConsumer mqtt, long originId, @@ -223,7 +204,8 @@ private MqttPublishProxy( this.routedId = routedId; this.initialId = initialId; this.replyId = supplyReplyId.applyAsLong(initialId); - this.delegate = new KafkaProxy(originId, resolvedId, this); + this.messages = new KafkaMessagesProxy(originId, resolvedId, this); + this.retained = new KafkaRetainedProxy(originId, resolvedId, this); } private void onMqttMessage( @@ -280,7 +262,38 @@ private void onMqttBegin( assert initialAck <= initialSeq; - delegate.doKafkaBegin(traceId, authorization, affinity); + final OctetsFW extension = begin.extension(); + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_PUBLISH; + final MqttPublishBeginExFW mqttPublishBeginEx = mqttBeginEx.publish(); + String topicName = mqttPublishBeginEx.topic().asString(); + assert topicName != null; + + String[] topicHeaders = topicName.split("/"); + topicNameHeaders = new OctetsFW[topicHeaders.length]; + for (int i = 0; i < topicHeaders.length; i++) + { + String16FW topicHeader = new String16FW(topicHeaders[i]); + topicNameHeaders[i] = new OctetsFW().wrap(topicHeader.value(), 0, topicHeader.length()); + } + clientIdOctets = new OctetsFW() + .wrap(mqttPublishBeginEx.clientId().value(), 0, mqttPublishBeginEx.clientId().length()); + final DirectBuffer topicNameBuffer = mqttPublishBeginEx.topic().value(); + + final MutableDirectBuffer keyBuffer = new UnsafeBuffer(new byte[topicNameBuffer.capacity() + 4]); + key = new KafkaKeyFW.Builder() + .wrap(keyBuffer, 0, keyBuffer.capacity()) + .length(topicNameBuffer.capacity()) + .value(topicNameBuffer, 0, topicNameBuffer.capacity()) + .build(); + + messages.doKafkaBegin(traceId, authorization, affinity); + this.retainAvailable = (mqttPublishBeginEx.flags() & 1 << MqttPublishFlags.RETAIN.value()) != 0; + if (retainAvailable) + { + retained.doKafkaBegin(traceId, authorization, affinity); + } } private void onMqttData( @@ -323,7 +336,15 @@ private void onMqttData( if (mqttPublishDataEx.expiryInterval() != -1) { - addHeader(helper.kafkaTimeoutHeaderName, mqttPublishDataEx.expiryInterval() * 1000); + final MutableDirectBuffer expiryBuffer = new UnsafeBuffer(new byte[4]); + expiryBuffer.putInt(0, mqttPublishDataEx.expiryInterval() * 1000, ByteOrder.BIG_ENDIAN); + kafkaHeadersRW.item(h -> + { + h.nameLen(helper.kafkaTimeoutHeaderName.sizeof()); + h.name(helper.kafkaTimeoutHeaderName); + h.valueLen(4); + h.value(expiryBuffer, 0, expiryBuffer.capacity()); + }); } if (mqttPublishDataEx.contentType().asString() != null) @@ -361,10 +382,26 @@ private void onMqttData( .headers(kafkaHeadersRW.build())) .build(); - //TODO: do this onMqttData for subscribe - // doMqttReset(traceId); - // delegate.doKafkaAbort(traceId, authorization); - delegate.doKafkaData(traceId, authorization, budgetId, reserved, flags, payload, kafkaDataEx); + messages.doKafkaData(traceId, authorization, budgetId, reserved, flags, payload, kafkaDataEx); + + if (retainAvailable) + { + if ((mqttPublishDataEx.flags() & 1 << MqttPublishFlags.RETAIN.value()) != 0) + { + retained.doKafkaData(traceId, authorization, budgetId, reserved, flags, payload, kafkaDataEx); + } + else + { + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.partition(p -> p.partitionId(-1).partitionOffset(-1)) + .capabilities(c -> c.set(KafkaCapabilities.PRODUCE_ONLY)) + .key(key)) + .build(); + retained.doKafkaFlush(traceId, authorization, budgetId, reserved, kafkaFlushEx); + } + } } @@ -384,7 +421,11 @@ private void onMqttEnd( assert initialAck <= initialSeq; - delegate.doKafkaEnd(traceId, initialSeq, authorization); + messages.doKafkaEnd(traceId, initialSeq, authorization); + if (retainAvailable) + { + retained.doKafkaEnd(traceId, initialSeq, authorization); + } } private void onMqttAbort( @@ -403,7 +444,11 @@ private void onMqttAbort( assert initialAck <= initialSeq; - delegate.doKafkaAbort(traceId, authorization); + messages.doKafkaAbort(traceId, authorization); + if (retainAvailable) + { + retained.doKafkaAbort(traceId, authorization); + } } private void onMqttReset( @@ -425,7 +470,11 @@ private void onMqttReset( assert replyAck <= replySeq; - delegate.doKafkaReset(traceId); + messages.doKafkaReset(traceId); + if (retainAvailable) + { + retained.doKafkaReset(traceId); + } } private void onMqttWindow( @@ -452,7 +501,11 @@ private void onMqttWindow( assert replyAck <= replySeq; - delegate.doKafkaWindow(traceId, authorization, budgetId, padding, capabilities); + messages.doKafkaWindow(traceId, authorization, budgetId, padding, capabilities); + if (retainAvailable) + { + retained.doKafkaWindow(traceId, authorization, budgetId, padding, capabilities); + } } private void doMqttBegin( @@ -460,9 +513,9 @@ private void doMqttBegin( long authorization, long affinity) { - replySeq = delegate.replySeq; - replyAck = delegate.replyAck; - replyMax = delegate.replyMax; + replySeq = messages.replySeq; + replyAck = messages.replyAck; + replyMax = messages.replyMax; state = MqttKafkaState.openingReply(state); doBegin(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, @@ -492,9 +545,10 @@ private void doMqttFlush( long budgetId, int reserved) { - replySeq = delegate.replySeq; + replySeq = messages.replySeq; - doFlush(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, budgetId, reserved); + doFlush(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, budgetId, reserved, + EMPTY_OCTETS); } private void doMqttAbort( @@ -503,7 +557,7 @@ private void doMqttAbort( { if (!MqttKafkaState.replyClosed(state)) { - replySeq = delegate.replySeq; + replySeq = messages.replySeq; state = MqttKafkaState.closeReply(state); doAbort(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization); @@ -516,7 +570,7 @@ private void doMqttEnd( { if (!MqttKafkaState.replyClosed(state)) { - replySeq = delegate.replySeq; + replySeq = messages.replySeq; state = MqttKafkaState.closeReply(state); doEnd(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization); @@ -530,11 +584,17 @@ private void doMqttWindow( int padding, int capabilities) { - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; + final long newInitialAck = retainAvailable ? Math.min(messages.initialAck, retained.initialAck) : messages.initialAck; + final int newInitialMax = retainAvailable ? Math.min(messages.initialMax, retained.initialMax) : messages.initialMax; + + if (initialAck != newInitialAck || initialMax != newInitialMax) + { + initialAck = newInitialAck; + initialMax = newInitialMax; - doWindow(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, padding, 0, capabilities); + doWindow(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding, 0, capabilities); + } } private void doMqttReset( @@ -549,20 +609,6 @@ private void doMqttReset( } } - private void addHeader( - OctetsFW key, - int value) - { - headerIntValueBuffer.putInt(0, value, ByteOrder.BIG_ENDIAN); - kafkaHeadersRW.item(h -> - { - h.nameLen(key.sizeof()); - h.name(key); - h.valueLen(4); - h.value(headerIntValueBuffer, 0, 4); - }); - } - private void addHeader( OctetsFW key, OctetsFW value) @@ -612,7 +658,7 @@ private void addHeader(String16FW key, String16FW value) } - final class KafkaProxy + final class KafkaMessagesProxy { private MessageConsumer kafka; private final long originId; @@ -632,7 +678,7 @@ final class KafkaProxy private int replyMax; private int replyPad; - private KafkaProxy( + private KafkaMessagesProxy( long originId, long routedId, MqttPublishProxy delegate) @@ -655,7 +701,7 @@ private void doKafkaBegin( state = MqttKafkaState.openingInitial(state); kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity); + traceId, authorization, affinity, kafkaTopic); } private void doKafkaData( @@ -916,6 +962,325 @@ private void doKafkaWindow( } } + final class KafkaRetainedProxy + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final MqttPublishProxy delegate; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaRetainedProxy( + long originId, + long routedId, + MqttPublishProxy delegate) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, kafkaRetainedTopic); + } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + KafkaFlushExFW extension) + { + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doKafkaEnd( + long traceId, + long sequence, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttBegin(traceId, authorization, affinity); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + doKafkaReset(traceId); + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttEnd(traceId, authorization); + } + + private void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long budgetId = flush.budgetId(); + final int reserved = flush.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + + assert replyAck <= replySeq; + + delegate.doMqttFlush(traceId, authorization, budgetId, reserved); + } + + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + + delegate.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doMqttReset(traceId); + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + } + private void doBegin( MessageConsumer receiver, @@ -1040,7 +1405,8 @@ private void doFlush( long traceId, long authorization, long budgetId, - int reserved) + int reserved, + Flyweight extension) { final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -1053,6 +1419,7 @@ private void doFlush( .authorization(authorization) .budgetId(budgetId) .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) .build(); receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); @@ -1068,13 +1435,14 @@ private MessageConsumer newKafkaStream( int maximum, long traceId, long authorization, - long affinity) + long affinity, + String16FW topic) { final KafkaBeginExFW kafkaBeginEx = kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) .merged(m -> m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_ONLY)) - .topic(KAFKA_MESSAGES_TOPIC_NAME) + .topic(topic) .partitionsItem(p -> p.partitionId(-1).partitionOffset(-2L)) .ackMode(b -> b.set(KAFKA_DEFAULT_ACK_MODE))) .build(); diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java index 8e68a70bd1..bca0b7335b 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java @@ -15,14 +15,22 @@ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPublishFlags.RETAIN; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSubscribeFlags.NO_LOCAL; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSubscribeFlags.RETAIN_AS_PUBLISHED; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSubscribeFlags.SEND_RETAINED; +import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; + +import java.util.ArrayList; +import java.util.List; import java.util.function.LongFunction; import java.util.function.LongUnaryOperator; import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.collections.IntArrayList; +import org.agrona.collections.Long2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration; @@ -35,12 +43,14 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaConditionFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaEvaluation; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaHeaderFW; -import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetType; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaSkip; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttTopicFilterFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Varuint32FW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.codec.MqttSubscribeMessageFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.AbortFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.BeginFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.DataFW; @@ -61,7 +71,7 @@ import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.binding.BindingHandler; import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; -import io.aklivity.zilla.runtime.engine.concurrent.Signaler; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; public class MqttKafkaSubscribeFactory implements BindingHandler { @@ -70,7 +80,10 @@ public class MqttKafkaSubscribeFactory implements BindingHandler private static final String MQTT_SINGLE_LEVEL_WILDCARD = "+"; private static final String MQTT_MULTI_LEVEL_WILDCARD = "#"; private static final int NO_LOCAL_FLAG = 1 << NO_LOCAL.ordinal(); - + private static final int SEND_RETAIN_FLAG = 1 << SEND_RETAINED.ordinal(); + private static final int RETAIN_FLAG = 1 << RETAIN.ordinal(); + private static final int RETAIN_AS_PUBLISHED_FLAG = 1 << RETAIN_AS_PUBLISHED.ordinal(); + public static final int DATA_FIN_FLAG = 0x03; private final OctetsFW emptyRO = new OctetsFW().wrap(new UnsafeBuffer(0L, 0), 0, 0); private final BeginFW beginRO = new BeginFW(); private final DataFW dataRO = new DataFW(); @@ -83,41 +96,45 @@ public class MqttKafkaSubscribeFactory implements BindingHandler private final EndFW.Builder endRW = new EndFW.Builder(); private final AbortFW.Builder abortRW = new AbortFW.Builder(); private final FlushFW.Builder flushRW = new FlushFW.Builder(); - private final OctetsFW.Builder octetsRW = new OctetsFW.Builder(); private final WindowFW windowRO = new WindowFW(); private final ResetFW resetRO = new ResetFW(); private final WindowFW.Builder windowRW = new WindowFW.Builder(); private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final MqttSubscribeMessageFW.Builder mqttSubscribeMessageRW = new MqttSubscribeMessageFW.Builder(); private final ExtensionFW extensionRO = new ExtensionFW(); private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); private final MqttFlushExFW mqttFlushExRO = new MqttFlushExFW(); - private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); private final KafkaHeaderFW kafkaHeaderRO = new KafkaHeaderFW(); + private final MqttSubscribeMessageFW mqttSubscribeMessageRO = new MqttSubscribeMessageFW(); private final MqttDataExFW.Builder mqttDataExRW = new MqttDataExFW.Builder(); private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); + private final Array32FW.Builder sendRetainedFiltersRW = + new Array32FW.Builder<>(new MqttTopicFilterFW.Builder(), new MqttTopicFilterFW()); + + private final Array32FW.Builder subscriptionIdsRW = + new Array32FW.Builder<>(new Varuint32FW.Builder(), new Varuint32FW()); private final MutableDirectBuffer writeBuffer; private final MutableDirectBuffer extBuffer; + private final MutableDirectBuffer subscriptionIdsBuffer; + private final MutableDirectBuffer retainFilterBuffer; private final BindingHandler streamFactory; + private final BufferPool bufferPool; private final LongUnaryOperator supplyInitialId; private final LongUnaryOperator supplyReplyId; private final int mqttTypeId; private final int kafkaTypeId; - private final Signaler signaler; private final LongFunction supplyBinding; private final MqttKafkaHeaderHelper helper; - - private String clientId; - private Array32FW filters; - private IntArrayList subscriptionIds = new IntArrayList(); - private String16FW kafkaMessagesTopicName; + private final String16FW kafkaMessagesTopicName; + private final String16FW kafkaRetainedTopicName; public MqttKafkaSubscribeFactory( MqttKafkaConfiguration config, @@ -128,13 +145,16 @@ public MqttKafkaSubscribeFactory( this.kafkaTypeId = context.supplyTypeId(KAFKA_TYPE_NAME); this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); - this.signaler = context.signaler(); + this.subscriptionIdsBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.retainFilterBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.streamFactory = context.streamFactory(); + this.bufferPool = context.bufferPool(); this.supplyInitialId = context::supplyInitialId; this.supplyReplyId = context::supplyReplyId; this.supplyBinding = supplyBinding; this.helper = new MqttKafkaHeaderHelper(); - this.kafkaMessagesTopicName = new String16FW(config.kafkaMessagesTopic()); + this.kafkaMessagesTopicName = new String16FW(config.messagesTopic()); + this.kafkaRetainedTopicName = new String16FW(config.retainedMessagesTopic()); } @Override @@ -151,14 +171,6 @@ public MessageConsumer newStream( final long initialId = begin.streamId(); final long authorization = begin.authorization(); - final OctetsFW extension = begin.extension(); - final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); - - assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SUBSCRIBE; - final MqttSubscribeBeginExFW mqttSubscribeBeginEx = mqttBeginEx.subscribe(); - clientId = mqttSubscribeBeginEx.clientId().asString(); - filters = mqttSubscribeBeginEx.filters(); - final MqttKafkaBindingConfig binding = supplyBinding.apply(routedId); final MqttKafkaRouteConfig resolved = binding != null ? binding.resolve(authorization) : null; @@ -182,7 +194,8 @@ private final class MqttSubscribeProxy private final long routedId; private final long initialId; private final long replyId; - private final KafkaProxy delegate; + private final KafkaMessagesProxy messages; + private final KafkaRetainedProxy retained; private int state; @@ -194,6 +207,15 @@ private final class MqttSubscribeProxy private long replyAck; private int replyMax; private int replyPad; + private long replyBud; + private int mqttSharedBudget; + + private final IntArrayList messagesSubscriptionIds; + private final IntArrayList retainedSubscriptionIds; + private final Long2ObjectHashMap retainAsPublished; + private final List retainedSubscriptions; + private String16FW clientId; + private boolean retainAvailable; private MqttSubscribeProxy( MessageConsumer mqtt, @@ -207,7 +229,12 @@ private MqttSubscribeProxy( this.routedId = routedId; this.initialId = initialId; this.replyId = supplyReplyId.applyAsLong(initialId); - this.delegate = new KafkaProxy(originId, resolvedId, this); + this.messagesSubscriptionIds = new IntArrayList(); + this.retainedSubscriptionIds = new IntArrayList(); + this.retainedSubscriptions = new ArrayList<>(); + this.retainAsPublished = new Long2ObjectHashMap<>(); + this.messages = new KafkaMessagesProxy(originId, resolvedId, this); + this.retained = new KafkaRetainedProxy(originId, resolvedId, this); } private void onMqttMessage( @@ -268,7 +295,46 @@ private void onMqttBegin( assert initialAck <= initialSeq; - delegate.doKafkaBegin(traceId, authorization, affinity); + final OctetsFW extension = begin.extension(); + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SUBSCRIBE; + final MqttSubscribeBeginExFW mqttSubscribeBeginEx = mqttBeginEx.subscribe(); + + clientId = newString16FW(mqttSubscribeBeginEx.clientId()); + + Array32FW filters = mqttSubscribeBeginEx.filters(); + filters.forEach(filter -> + { + int subscriptionId = (int) filter.subscriptionId(); + if (!messagesSubscriptionIds.contains(subscriptionId)) + { + messagesSubscriptionIds.add(subscriptionId); + } + if ((filter.flags() & SEND_RETAIN_FLAG) != 0) + { + retainAvailable = true; + } + }); + + final List retainedFilters = new ArrayList<>(); + if (retainAvailable) + { + filters.forEach(filter -> + { + final boolean sendRetained = (filter.flags() & SEND_RETAIN_FLAG) != 0; + if (sendRetained) + { + retainedFilters.add(new Subscription( + (int) filter.subscriptionId(), newString16FW(filter.pattern()), filter.qos(), filter.flags())); + } + }); + } + if (retainAvailable && !retainedFilters.isEmpty()) + { + retained.doKafkaBegin(traceId, authorization, affinity, retainedFilters); + } + messages.doKafkaBegin(traceId, authorization, affinity, filters); } private void onMqttFlush( @@ -282,11 +348,12 @@ private void onMqttFlush( final int reserved = flush.reserved(); assert acknowledge <= sequence; - assert sequence >= replySeq; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; - replySeq = sequence; + initialSeq = sequence; - assert replyAck <= replySeq; + assert initialAck <= initialSeq; final OctetsFW extension = flush.extension(); final MqttFlushExFW mqttFlushEx = extension.get(mqttFlushExRO::tryWrap); @@ -294,40 +361,86 @@ private void onMqttFlush( assert mqttFlushEx.kind() == MqttFlushExFW.KIND_SUBSCRIBE; final MqttSubscribeFlushExFW mqttSubscribeFlushEx = mqttFlushEx.subscribe(); - filters = mqttSubscribeFlushEx.filters(); - subscriptionIds.clear(); - + Array32FW filters = mqttSubscribeFlushEx.filters(); + messagesSubscriptionIds.clear(); final KafkaFlushExFW kafkaFlushEx = kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> + .typeId(kafkaTypeId) + .merged(m -> + { + m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); + filters.forEach(filter -> { - m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); - filters.forEach(filter -> - - m.filtersItem(f -> + if ((filter.flags() & SEND_RETAIN_FLAG) != 0) + { + retainAvailable = true; + } + m.filtersItem(f -> + { + final int subscriptionId = (int) filter.subscriptionId(); + f.conditionsItem(ci -> { - f.conditionsItem(ci -> - { - subscriptionIds.add((int) filter.subscriptionId()); - buildHeaders(ci, filter.pattern().asString()); - }); - boolean noLocal = (filter.flags() & NO_LOCAL_FLAG) != 0; - if (noLocal) + if (!messagesSubscriptionIds.contains(subscriptionId)) { - final DirectBuffer valueBuffer = new String16FW(clientId).value(); - f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> - h.nameLen(helper.kafkaLocalHeaderName.sizeof()) - .name(helper.kafkaLocalHeaderName) - .valueLen(valueBuffer.capacity()) - .value(valueBuffer, 0, valueBuffer.capacity()))))); + messagesSubscriptionIds.add(subscriptionId); } - })); - }) - .build(); + buildHeaders(ci, filter.pattern().asString()); + }); + + final boolean noLocal = (filter.flags() & NO_LOCAL_FLAG) != 0; + if (noLocal) + { + final DirectBuffer valueBuffer = clientId.value(); + f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + h.nameLen(helper.kafkaLocalHeaderName.sizeof()) + .name(helper.kafkaLocalHeaderName) + .valueLen(valueBuffer.capacity()) + .value(valueBuffer, 0, valueBuffer.capacity()))))); + } + }); + }); + }) + .build(); + + messages.doKafkaFlush(traceId, authorization, budgetId, reserved, kafkaFlushEx); - delegate.doKafkaFlush(traceId, authorization, budgetId, reserved, kafkaFlushEx); + if (retainAvailable) + { + final List retainedFilters = new ArrayList<>(); + filters.forEach(filter -> + { + final boolean sendRetained = (filter.flags() & SEND_RETAIN_FLAG) != 0; + if (sendRetained) + { + retainedFilters.add(new Subscription( + (int) filter.subscriptionId(), newString16FW(filter.pattern()), filter.qos(), filter.flags())); + final boolean rap = (filter.flags() & RETAIN_AS_PUBLISHED_FLAG) != 0; + retainAsPublished.put((int) filter.subscriptionId(), rap); + } + }); + + retainedSubscriptions.removeIf(rf -> !filters.anyMatch(f -> f.pattern().equals(rf.filter))); + if (!retainedFilters.isEmpty()) + { + if (MqttKafkaState.initialOpened(retained.state) && !MqttKafkaState.initialClosed(retained.state)) + { + retained.doKafkaFlush(traceId, authorization, budgetId, reserved, retainedFilters); + } + else + { + final List newRetainedFilters = new ArrayList<>(); + retainedFilters.forEach(subscription -> + { + if (!retainedSubscriptions.contains(subscription)) + { + newRetainedFilters.add(subscription); + } + }); + retained.doKafkaBegin(traceId, authorization, 0, newRetainedFilters); + } + } + } } private void onMqttData( @@ -346,7 +459,12 @@ private void onMqttData( assert initialAck <= initialSeq; doMqttReset(traceId); - delegate.doKafkaAbort(traceId, authorization); + + messages.doKafkaAbort(traceId, authorization); + if (retainAvailable) + { + retained.doKafkaAbort(traceId, authorization); + } } @@ -366,7 +484,12 @@ private void onMqttEnd( assert initialAck <= initialSeq; - delegate.doKafkaEnd(traceId, initialSeq, authorization); + + messages.doKafkaEnd(traceId, initialSeq, authorization); + if (retainAvailable) + { + retained.doKafkaEnd(traceId, initialSeq, authorization); + } } private void onMqttAbort( @@ -385,7 +508,12 @@ private void onMqttAbort( assert initialAck <= initialSeq; - delegate.doKafkaAbort(traceId, authorization); + + messages.doKafkaAbort(traceId, authorization); + if (retainAvailable) + { + retained.doKafkaAbort(traceId, authorization); + } } private void onMqttReset( @@ -407,7 +535,12 @@ private void onMqttReset( assert replyAck <= replySeq; - delegate.doKafkaReset(traceId); + + messages.doKafkaReset(traceId); + if (retainAvailable) + { + retained.doKafkaReset(traceId); + } } private void onMqttWindow( @@ -431,10 +564,21 @@ private void onMqttWindow( replyMax = maximum; replyPad = padding; state = MqttKafkaState.openReply(state); + this.replyBud = window.budgetId(); assert replyAck <= replySeq; - delegate.doKafkaWindow(traceId, authorization, budgetId, padding, capabilities); + mqttSharedBudget = replyMax - (int)(replySeq - replyAck); + + if (retainAvailable) + { + retained.doKafkaWindow(traceId, authorization, budgetId, padding, capabilities); + } + else if (messages.messageSlotOffset != messages.messageSlotLimit) + { + messages.flushData(traceId, authorization, budgetId); + } + messages.doKafkaWindow(traceId, authorization, budgetId, padding, capabilities); } private void doMqttBegin( @@ -442,9 +586,6 @@ private void doMqttBegin( long authorization, long affinity) { - replySeq = delegate.replySeq; - replyAck = delegate.replyAck; - replyMax = delegate.replyMax; state = MqttKafkaState.openingReply(state); doBegin(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, @@ -474,7 +615,7 @@ private void doMqttFlush( long budgetId, int reserved) { - replySeq = delegate.replySeq; + replySeq = messages.replySeq; doFlush(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, budgetId, reserved, emptyRO); @@ -486,7 +627,7 @@ private void doMqttAbort( { if (!MqttKafkaState.replyClosed(state)) { - replySeq = delegate.replySeq; + replySeq = messages.replySeq; state = MqttKafkaState.closeReply(state); doAbort(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization); @@ -499,7 +640,7 @@ private void doMqttEnd( { if (!MqttKafkaState.replyClosed(state)) { - replySeq = delegate.replySeq; + replySeq = messages.replySeq; state = MqttKafkaState.closeReply(state); doEnd(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization); @@ -513,8 +654,8 @@ private void doMqttWindow( int padding, int capabilities) { - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; + initialAck = messages.initialAck; + initialMax = messages.initialMax; doWindow(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, budgetId, padding, 0, capabilities); @@ -530,16 +671,31 @@ private void doMqttReset( doReset(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId); } } + + public int replyPendingAck() + { + return (int)(replySeq - replyAck); + } + + private int replyWindow() + { + return replyMax - replyPendingAck(); + } } - final class KafkaProxy + final class KafkaMessagesProxy { private MessageConsumer kafka; private final long originId; private final long routedId; private final long initialId; private final long replyId; - private final MqttSubscribeProxy delegate; + private final MqttSubscribeProxy mqtt; + + private int dataSlot = NO_SLOT; + private int messageSlotLimit; + private int messageSlotOffset; + private int messageSlotReserved; private int state; @@ -552,14 +708,14 @@ final class KafkaProxy private int replyMax; private int replyPad; - private KafkaProxy( + private KafkaMessagesProxy( long originId, long routedId, - MqttSubscribeProxy delegate) + MqttSubscribeProxy mqtt) { this.originId = originId; this.routedId = routedId; - this.delegate = delegate; + this.mqtt = mqtt; this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); } @@ -567,15 +723,19 @@ private KafkaProxy( private void doKafkaBegin( long traceId, long authorization, - long affinity) + long affinity, + Array32FW filters) { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; - state = MqttKafkaState.openingInitial(state); + if (!MqttKafkaState.initialOpening(state)) + { + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + state = MqttKafkaState.openingInitial(state); - kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity); + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, mqtt.clientId, kafkaMessagesTopicName, filters, KafkaOffsetType.LIVE); + } } private void doKafkaFlush( @@ -585,7 +745,7 @@ private void doKafkaFlush( int reserved, Flyweight extension) { - initialSeq = delegate.initialSeq; + initialSeq = mqtt.initialSeq; doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, budgetId, reserved, extension); @@ -596,11 +756,11 @@ private void doKafkaEnd( long sequence, long authorization) { - if (!MqttKafkaState.initialClosed(state)) + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; state = MqttKafkaState.closeInitial(state); doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); @@ -611,11 +771,11 @@ private void doKafkaAbort( long traceId, long authorization) { - if (!MqttKafkaState.initialClosed(state)) + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; state = MqttKafkaState.closeInitial(state); doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); @@ -682,7 +842,8 @@ private void onKafkaBegin( assert replyAck <= replySeq; - delegate.doMqttBegin(traceId, authorization, affinity); + mqtt.doMqttBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, mqtt.replyBud, mqtt.replyPad, 0); } private void onKafkaData( @@ -705,11 +866,12 @@ private void onKafkaData( if (replySeq > replyAck + replyMax) { doKafkaReset(traceId); - delegate.doMqttAbort(traceId, authorization); + mqtt.doMqttAbort(traceId, authorization); } else { final int flags = data.flags(); + final int length = data.length(); final OctetsFW payload = data.payload(); final OctetsFW extension = data.extension(); final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); @@ -717,10 +879,7 @@ private void onKafkaData( dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; final KafkaMergedDataExFW kafkaMergedDataEx = kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; - final Array32FW progress = kafkaMergedDataEx != null ? kafkaMergedDataEx.progress() : null; final OctetsFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key().value() : null; - final Array32FW headers = kafkaMergedDataEx != null ? kafkaMergedDataEx.headers() : null; - //TODO: data final long filters = kafkaMergedDataEx != null ? kafkaMergedDataEx.filters() : 0; if (key != null) @@ -728,19 +887,25 @@ private void onKafkaData( String topicName = kafkaMergedDataEx.key().value() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)); helper.visit(kafkaMergedDataEx); - final Flyweight mqttSubscribeDataEx = mqttDataExRW.wrap(extBuffer, 0, extBuffer.capacity()) + + final MqttDataExFW mqttSubscribeDataEx = mqttDataExRW.wrap(extBuffer, 0, extBuffer.capacity()) .typeId(mqttTypeId) .subscribe(b -> { b.topic(topicName); - for (int i = 0; i < subscriptionIds.size(); i++) + + int flag = 0; + subscriptionIdsRW.wrap(subscriptionIdsBuffer, 0, subscriptionIdsBuffer.capacity()); + for (int i = 0; i < mqtt.messagesSubscriptionIds.size(); i++) { if (((filters >> i) & 1) == 1) { - int index = i; - b.subscriptionIdsItem(c -> c.set(subscriptionIds.get(index))); + long subscriptionId = mqtt.messagesSubscriptionIds.get(i); + subscriptionIdsRW.item(si -> si.set((int) subscriptionId)); } } + b.flags(flag); + b.subscriptionIds(subscriptionIdsRW.build()); if (helper.timeout != -1) { b.expiryInterval(helper.timeout / 1000); @@ -780,11 +945,76 @@ private void onKafkaData( }); }).build(); - delegate.doMqttData(traceId, authorization, budgetId, reserved, flags, payload, mqttSubscribeDataEx); + if (!MqttKafkaState.initialOpened(mqtt.retained.state) || + MqttKafkaState.replyClosed(mqtt.retained.state)) + { + mqtt.doMqttData(traceId, authorization, budgetId, reserved, flags, payload, mqttSubscribeDataEx); + mqtt.mqttSharedBudget -= length; + } + else + { + if (dataSlot == NO_SLOT) + { + dataSlot = bufferPool.acquire(initialId); + } + + if (dataSlot == NO_SLOT) + { + cleanup(traceId, authorization); + } + + + final MutableDirectBuffer dataBuffer = bufferPool.buffer(dataSlot); + Flyweight message = mqttSubscribeMessageRW.wrap(dataBuffer, messageSlotLimit, dataBuffer.capacity()) + .extension(mqttSubscribeDataEx.buffer(), mqttSubscribeDataEx.offset(), mqttSubscribeDataEx.sizeof()) + .payload(payload) + .build(); + + messageSlotLimit = message.limit(); + messageSlotReserved += reserved; + } + } + } + } + + private void flushData( + long traceId, + long authorization, + long budgetId) + { + int length = Math.max(Math.min(mqtt.replyWindow() - mqtt.replyPad, messageSlotLimit - messageSlotOffset), 0); + int reserved = length + mqtt.replyPad; + if (length > 0) + { + final MutableDirectBuffer dataBuffer = bufferPool.buffer(dataSlot); + // TODO: data fragmentation + while (messageSlotOffset != length) + { + final MqttSubscribeMessageFW message = mqttSubscribeMessageRO.wrap(dataBuffer, messageSlotOffset, + dataBuffer.capacity()); + mqtt.doMqttData(traceId, authorization, budgetId, reserved, DATA_FIN_FLAG, message.payload(), + message.extension()); + + messageSlotOffset += message.sizeof(); + } + if (messageSlotOffset == messageSlotLimit) + { + bufferPool.release(dataSlot); + dataSlot = NO_SLOT; + messageSlotLimit = 0; + messageSlotOffset = 0; } } } + private void cleanup( + long traceId, + long authorization) + { + mqtt.doMqttAbort(traceId, authorization); + doKafkaAbort(traceId, authorization); + } + private void onKafkaEnd( EndFW end) { @@ -801,7 +1031,7 @@ private void onKafkaEnd( assert replyAck <= replySeq; - delegate.doMqttEnd(traceId, authorization); + mqtt.doMqttEnd(traceId, authorization); } private void onKafkaFlush( @@ -821,7 +1051,7 @@ private void onKafkaFlush( assert replyAck <= replySeq; - delegate.doMqttFlush(traceId, authorization, budgetId, reserved); + mqtt.doMqttFlush(traceId, authorization, budgetId, reserved); } private void onKafkaAbort( @@ -840,7 +1070,7 @@ private void onKafkaAbort( assert replyAck <= replySeq; - delegate.doMqttAbort(traceId, authorization); + mqtt.doMqttAbort(traceId, authorization); } private void onKafkaWindow( @@ -856,8 +1086,8 @@ private void onKafkaWindow( final int capabilities = window.capabilities(); assert acknowledge <= sequence; - assert acknowledge >= delegate.initialAck; - assert maximum >= delegate.initialMax; + assert acknowledge >= mqtt.initialAck; + assert maximum >= mqtt.initialMax; initialAck = acknowledge; initialMax = maximum; @@ -865,7 +1095,7 @@ private void onKafkaWindow( assert initialAck <= initialSeq; - delegate.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); + mqtt.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); } private void onKafkaReset( @@ -876,19 +1106,19 @@ private void onKafkaReset( final long traceId = reset.traceId(); assert acknowledge <= sequence; - assert acknowledge >= delegate.initialAck; + assert acknowledge >= mqtt.initialAck; - delegate.initialAck = acknowledge; + mqtt.initialAck = acknowledge; - assert delegate.initialAck <= delegate.initialSeq; + assert mqtt.initialAck <= mqtt.initialSeq; - delegate.doMqttReset(traceId); + mqtt.doMqttReset(traceId); } private void doKafkaReset( long traceId) { - if (!MqttKafkaState.replyClosed(state)) + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.replyClosed(state)) { state = MqttKafkaState.closeReply(state); @@ -903,123 +1133,619 @@ private void doKafkaWindow( int padding, int capabilities) { - replyAck = delegate.replyAck; - replyMax = delegate.replyMax; - replyPad = delegate.replyPad; + if (MqttKafkaState.replyOpening(state)) + { + final int replyWin = replyMax - (int) (replySeq - replyAck); + final int newReplyWin = mqtt.mqttSharedBudget; - doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, budgetId, padding, replyPad, capabilities); - } - } + final int replyCredit = newReplyWin - replyWin; + if (replyCredit > 0) + { + final int replyNoAck = (int) (replySeq - replyAck - messageSlotReserved); + final int replyAcked = Math.min(replyNoAck, replyCredit); + replyAck += replyAcked; + assert replyAck <= replySeq; - private void doBegin( - MessageConsumer receiver, - long originId, - long routedId, - long streamId, - long sequence, - long acknowledge, - int maximum, - long traceId, - long authorization, - long affinity) - { - final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) - .originId(originId) - .routedId(routedId) - .streamId(streamId) - .sequence(sequence) - .acknowledge(acknowledge) - .maximum(maximum) - .traceId(traceId) - .authorization(authorization) - .affinity(affinity) - .build(); + replyMax = newReplyWin + (int) (replySeq - replyAck - messageSlotReserved); + assert replyMax >= 0; - receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + if (messageSlotReserved > 0 && dataSlot == NO_SLOT) + { + replyAck += messageSlotReserved; + messageSlotReserved = 0; + } + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + } + } } - private void doData( - MessageConsumer receiver, - long originId, - long routedId, - long streamId, - long sequence, - long acknowledge, - int maximum, - long traceId, - long authorization, - long budgetId, - int flags, - int reserved, - OctetsFW payload, - Flyweight extension) + final class KafkaRetainedProxy { - final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) - .originId(originId) - .routedId(routedId) - .streamId(streamId) - .sequence(sequence) - .acknowledge(acknowledge) - .maximum(maximum) - .traceId(traceId) - .authorization(authorization) - .flags(flags) - .budgetId(budgetId) - .reserved(reserved) - .payload(payload) - .extension(extension.buffer(), extension.offset(), extension.sizeof()) - .build(); + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final MqttSubscribeProxy mqtt; - receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); - } + private int state; - private void doEnd( - MessageConsumer receiver, - long originId, - long routedId, - long streamId, - long sequence, - long acknowledge, - int maximum, - long traceId, - long authorization) - { - final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) - .originId(originId) - .routedId(routedId) - .streamId(streamId) - .sequence(sequence) - .acknowledge(acknowledge) - .maximum(maximum) - .traceId(traceId) - .authorization(authorization) - .build(); + private long initialSeq; + private long initialAck; + private int initialMax; - receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); - } + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; - private void doAbort( - MessageConsumer receiver, - long originId, - long routedId, - long streamId, - long sequence, - long acknowledge, - int maximum, - long traceId, - long authorization) - { - final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) - .originId(originId) - .routedId(routedId) - .streamId(streamId) - .sequence(sequence) - .acknowledge(acknowledge) - .maximum(maximum) - .traceId(traceId) - .authorization(authorization) + private KafkaRetainedProxy( + long originId, + long routedId, + MqttSubscribeProxy mqtt) + { + this.originId = originId; + this.routedId = routedId; + this.mqtt = mqtt; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity, + List newRetainedFilters) + { + state = 0; + replySeq = 0; + replyAck = 0; + replyMax = 0; + + sendRetainedFiltersRW.wrap(retainFilterBuffer, 0, retainFilterBuffer.capacity()); + + newRetainedFilters.forEach(f -> + { + final int subscriptionId = f.id; + if (!mqtt.retainedSubscriptionIds.contains(subscriptionId)) + { + mqtt.retainedSubscriptionIds.add(subscriptionId); + } + sendRetainedFiltersRW.item(fb -> fb + .subscriptionId(subscriptionId).qos(f.qos).flags(f.flags).pattern(f.filter)); + final boolean rap = (f.flags & RETAIN_AS_PUBLISHED_FLAG) != 0; + mqtt.retainAsPublished.put(f.id, rap); + }); + mqtt.retainedSubscriptions.addAll(newRetainedFilters); + + Array32FW retainedFilters = sendRetainedFiltersRW.build(); + + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + + state = MqttKafkaState.openingInitial(state); + + kafka = + newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, mqtt.clientId, kafkaRetainedTopicName, + retainedFilters, KafkaOffsetType.HISTORICAL); + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + List retainedFiltersList) + { + initialSeq = mqtt.initialSeq; + + sendRetainedFiltersRW.wrap(retainFilterBuffer, 0, retainFilterBuffer.capacity()); + + retainedFiltersList.forEach(f -> + { + final int subscriptionId = f.id; + if (!mqtt.retainedSubscriptionIds.contains(subscriptionId)) + { + mqtt.retainedSubscriptionIds.add(subscriptionId); + } + sendRetainedFiltersRW.item(fb -> fb + .subscriptionId(subscriptionId).qos(f.qos).flags(f.flags).pattern(f.filter)); + final boolean rap = (f.flags & RETAIN_AS_PUBLISHED_FLAG) != 0; + mqtt.retainAsPublished.put(f.id, rap); + }); + + Array32FW retainedFilters = sendRetainedFiltersRW.build(); + + final KafkaFlushExFW retainedKafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> + { + m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); + retainedFilters.forEach(filter -> + { + m.filtersItem(f -> + { + final int subscriptionId = (int) filter.subscriptionId(); + f.conditionsItem(ci -> + { + if (!mqtt.messagesSubscriptionIds.contains(subscriptionId)) + { + mqtt.messagesSubscriptionIds.add(subscriptionId); + } + buildHeaders(ci, filter.pattern().asString()); + }); + }); + }); + }) + .build(); + + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, retainedKafkaFlushEx); + } + + private void doKafkaEnd( + long traceId, + long sequence, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = mqtt.initialSeq; + initialAck = mqtt.initialAck; + initialMax = mqtt.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = replySeq + sequence; + replyAck = replySeq + acknowledge; + replyMax = maximum; + + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + mqtt.doMqttBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, mqtt.replyBud, mqtt.replyPad, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + doKafkaReset(traceId); + mqtt.doMqttAbort(traceId, authorization); + } + else + { + final int flags = data.flags(); + final int length = data.length(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final OctetsFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key().value() : null; + final long filters = kafkaMergedDataEx != null ? kafkaMergedDataEx.filters() : 0; + + if (key != null) + { + String topicName = kafkaMergedDataEx.key().value() + .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)); + helper.visit(kafkaMergedDataEx); + final Flyweight mqttSubscribeDataEx = mqttDataExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(mqttTypeId) + .subscribe(b -> + { + b.topic(topicName); + + int flag = 0; + subscriptionIdsRW.wrap(subscriptionIdsBuffer, 0, subscriptionIdsBuffer.capacity()); + for (int i = 0; i < mqtt.retainedSubscriptionIds.size(); i++) + { + if (((filters >> i) & 1) == 1) + { + long subscriptionId = mqtt.retainedSubscriptionIds.get(i); + if (mqtt.retainAsPublished.getOrDefault(subscriptionId, false)) + { + flag |= RETAIN_FLAG; + } + subscriptionIdsRW.item(si -> si.set((int) subscriptionId)); + } + } + b.flags(flag); + b.subscriptionIds(subscriptionIdsRW.build()); + if (helper.timeout != -1) + { + b.expiryInterval(helper.timeout / 1000); + } + if (helper.contentType != null) + { + b.contentType( + helper.contentType.buffer(), helper.contentType.offset(), helper.contentType.sizeof()); + } + if (helper.format != null) + { + b.format(f -> f.set(MqttPayloadFormat.valueOf(helper.format))); + } + if (helper.replyTo != null) + { + b.responseTopic( + helper.replyTo.buffer(), helper.replyTo.offset(), helper.replyTo.sizeof()); + } + if (helper.correlation != null) + { + b.correlation(c -> c.bytes(helper.correlation)); + } + + final DirectBuffer buffer = kafkaMergedDataEx.buffer(); + final int limit = kafkaMergedDataEx.limit(); + helper.userPropertiesOffsets.forEach(o -> + { + final KafkaHeaderFW header = kafkaHeaderRO.wrap(buffer, o, limit); + final OctetsFW name = header.name(); + final OctetsFW value = header.value(); + if (value != null) + { + b.propertiesItem(pi -> pi + .key(name.buffer(), name.offset(), name.sizeof()) + .value(value.buffer(), value.offset(), value.sizeof())); + } + }); + }).build(); + + mqtt.doMqttData(traceId, authorization, budgetId, reserved, flags, payload, mqttSubscribeDataEx); + + mqtt.mqttSharedBudget -= length; + } + } + } + + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + mqtt.messages.flushData(traceId, authorization, mqtt.replyBud); + } + + private void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long budgetId = flush.budgetId(); + final int reserved = flush.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + + assert replyAck <= replySeq; + + mqtt.retainedSubscriptionIds.clear(); + doKafkaEnd(traceId, sequence, authorization); + } + + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + mqtt.doMqttAbort(traceId, authorization); + } + + private void onKafkaWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= mqtt.initialAck; + assert maximum >= mqtt.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + + mqtt.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= mqtt.initialAck; + + mqtt.initialAck = acknowledge; + + assert mqtt.initialAck <= mqtt.initialSeq; + + mqtt.doMqttReset(traceId); + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + if (MqttKafkaState.replyOpening(state) && + !MqttKafkaState.replyClosing(state)) + { + final int replyWin = replyMax - (int) (replySeq - replyAck); + final int newReplyWin = mqtt.mqttSharedBudget; + + final int replyCredit = newReplyWin - replyWin; + + if (replyCredit > 0) + { + final int replyNoAck = (int) (replySeq - replyAck); + final int replyAcked = Math.min(replyNoAck, replyCredit); + + replyAck += replyAcked; + assert replyAck <= replySeq; + + replyMax = newReplyWin + (int) (replySeq - replyAck); + assert replyMax >= 0; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + } + } + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) .build(); receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); @@ -1066,7 +1792,11 @@ private MessageConsumer newKafkaStream( int maximum, long traceId, long authorization, - long affinity) + long affinity, + String16FW clientId, + String16FW topic, + Array32FW filters, + KafkaOffsetType offsetType) { final KafkaBeginExFW kafkaBeginEx = kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) @@ -1074,23 +1804,19 @@ private MessageConsumer newKafkaStream( .merged(m -> { m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); - m.topic(kafkaMessagesTopicName); + m.topic(topic); m.partitionsItem(p -> - p.partitionId(-1) - .partitionOffset(-1L)); + p.partitionId(offsetType.value()) + .partitionOffset(offsetType.value())); filters.forEach(filter -> m.filtersItem(f -> { - f.conditionsItem(ci -> - { - subscriptionIds.add((int) filter.subscriptionId()); - buildHeaders(ci, filter.pattern().asString()); - }); + f.conditionsItem(ci -> buildHeaders(ci, filter.pattern().asString())); boolean noLocal = (filter.flags() & NO_LOCAL_FLAG) != 0; if (noLocal) { - final DirectBuffer valueBuffer = new String16FW(clientId).value(); + final DirectBuffer valueBuffer = clientId.value(); f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> h.nameLen(helper.kafkaLocalHeaderName.sizeof()) .name(helper.kafkaLocalHeaderName) @@ -1209,4 +1935,30 @@ private void doReset( sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); } + + private String16FW newString16FW( + String16FW value) + { + return new String16FW().wrap(value.buffer(), value.offset(), value.limit()); + } + + private static final class Subscription + { + private final int id; + private final String16FW filter; + private final int qos; + private final int flags; + + Subscription( + int id, + String16FW filter, + int qos, + int flags) + { + this.id = id; + this.filter = filter; + this.qos = qos; + this.flags = flags; + } + } } diff --git a/incubator/binding-mqtt-kafka/src/main/zilla/internal.idl b/incubator/binding-mqtt-kafka/src/main/zilla/internal.idl new file mode 100644 index 0000000000..a77267637b --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/main/zilla/internal.idl @@ -0,0 +1,28 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + + scope internal + { + scope codec + { + struct MqttSubscribeMessage + { + uint32 extensionLength; + octets[extensionLength] extension; + uint32 payloadLength; + octets[payloadLength] payload; + } + } + } diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java new file mode 100644 index 0000000000..e059806edd --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal; + + +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.KAFKA_MESSAGES_TOPIC; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.KAFKA_RETAINED_MESSAGES_TOPIC; +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class MqttKafkaConfigurationTest +{ + public static final String KAFKA_MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.messages.topic"; + public static final String KAFKA_RETAINED_MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.retained.messages.topic"; + + @Test + public void shouldVerifyConstants() + { + assertEquals(KAFKA_MESSAGES_TOPIC.name(), KAFKA_MESSAGES_TOPIC_NAME); + assertEquals(KAFKA_RETAINED_MESSAGES_TOPIC.name(), KAFKA_RETAINED_MESSAGES_TOPIC_NAME); + } +} diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java index 132459d1c4..da151e3284 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java @@ -110,6 +110,46 @@ public void shouldAbortWhenServerSentData() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/publish.retained.server.sent.abort/client", + "${kafka}/publish.retained.server.sent.abort/server"}) + public void shouldPublishRetainedThenReceiveServerSentAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/publish.retained.server.sent.flush/client", + "${kafka}/publish.retained.server.sent.flush/server"}) + public void shouldPublishRetainedThenReceiveServerSentFlush() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/publish.retained.server.sent.reset/client", + "${kafka}/publish.retained.server.sent.reset/server"}) + public void shouldPublishRetainedThenReceiveServerSentReset() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/publish.retained.server.sent.data/client", + "${kafka}/publish.retained.server.sent.data/server"}) + public void shouldPublishRetainedThenAbortWhenServerSentData() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Specification({ @@ -120,6 +160,16 @@ public void shouldSendOneMessage() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/publish.retained/client", + "${kafka}/publish.retained/server"}) + public void shouldPublishRetainedMessage() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Specification({ diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java index bc646e6e76..9023847b73 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java @@ -15,6 +15,7 @@ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -43,6 +44,7 @@ public class MqttKafkaSubscribeProxyIT .responseBufferCapacity(1024) .counterValuesBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) + .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/kafka/config") .external("kafka0") .clean(); @@ -110,6 +112,26 @@ public void shouldReceiveServerSentReset() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.retained.server.sent.abort/client", + "${kafka}/subscribe.retained.server.sent.abort/server"}) + public void shouldReceiveServerSentRetainedAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.retained.server.sent.reset/client", + "${kafka}/subscribe.retained.server.sent.reset/server"}) + public void shouldReceiveServerSentRetainedReset() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Specification({ @@ -120,6 +142,76 @@ public void shouldReceiveOneMessage() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.multiple.message/client", + "${kafka}/subscribe.multiple.message/server"}) + public void shouldReceiveMultipleMessage() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.retain.as.published/client", + "${kafka}/subscribe.retain/server"}) + public void shouldReceiveRetainAsPublished() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.retain/client", + "${kafka}/subscribe.retain/server"}) + public void shouldReceiveRetainedNoRetainAsPublished() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.filter.change.retain/client", + "${kafka}/subscribe.filter.change.retain/server"}) + public void shouldReceiveRetainedAfterFilterChange() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.filter.change.retain/client", + "${kafka}/subscribe.filter.change.retain.buffer/server"}) + public void shouldReceiveRetainedAfterFilterChangeBufferMessages() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.deferred.filter.change.retain/client", + "${kafka}/subscribe.deferred.filter.change.retain/server"}) + public void shouldReceiveRetainedAfterDeferredFilterChange() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/subscribe.filter.change.retain.resubscribe/client", + "${kafka}/subscribe.filter.change.retain.resubscribe/server"}) + public void shouldReceiveRetainedAfterResubscribe() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Specification({ diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index 58807ebe47..e145bf64de 100644 --- a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -278,6 +278,16 @@ public MqttPublishBeginExBuilder topic( return this; } + public MqttPublishBeginExBuilder flags( + String... flagNames) + { + int flags = Arrays.stream(flagNames) + .mapToInt(flag -> 1 << MqttPublishFlags.valueOf(flag).ordinal()) + .reduce(0, (a, b) -> a | b); + publishBeginExRW.flags(flags); + return this; + } + public MqttBeginExBuilder build() { final MqttPublishBeginExFW publishBeginEx = publishBeginExRW.build(); @@ -437,13 +447,6 @@ private MqttPublishDataExBuilder() publishDataExRW.wrap(writeBuffer, MqttBeginExFW.FIELD_OFFSET_PUBLISH, writeBuffer.capacity()); } - public MqttPublishDataExBuilder topic( - String topic) - { - publishDataExRW.topic(topic); - return this; - } - public MqttPublishDataExBuilder qos( String qos) { @@ -807,6 +810,15 @@ public static final class MqttBeginExMatcherBuilder private Integer kind; private Predicate caseMatcher; + public MqttPublishBeginExMatcherBuilder publish() + { + final MqttPublishBeginExMatcherBuilder matcherBuilder = new MqttPublishBeginExMatcherBuilder(); + + this.kind = MqttExtensionKind.PUBLISH.value(); + this.caseMatcher = matcherBuilder::match; + return matcherBuilder; + } + public MqttSubscribeBeginExMatcherBuilder subscribe() { final MqttSubscribeBeginExMatcherBuilder matcherBuilder = new MqttSubscribeBeginExMatcherBuilder(); @@ -878,6 +890,73 @@ private boolean matchCase( return caseMatcher == null || caseMatcher.test(beginEx); } + public final class MqttPublishBeginExMatcherBuilder + { + private String16FW clientId; + private String16FW topic; + private Integer flags; + + private MqttPublishBeginExMatcherBuilder() + { + } + public MqttPublishBeginExMatcherBuilder clientId( + String clientId) + { + this.clientId = new String16FW(clientId); + return this; + } + + public MqttPublishBeginExMatcherBuilder topic( + String topic) + { + this.topic = new String16FW(topic); + return this; + } + + public MqttPublishBeginExMatcherBuilder flags( + String... flags) + { + this.flags = Arrays.stream(flags) + .mapToInt(flag -> 1 << MqttPublishFlags.valueOf(flag).ordinal()) + .reduce(0, (a, b) -> a | b); + return this; + } + + + public MqttBeginExMatcherBuilder build() + { + return MqttBeginExMatcherBuilder.this; + } + + private boolean match( + MqttBeginExFW beginEx) + { + final MqttPublishBeginExFW publishBeginEx = beginEx.publish(); + return matchClientId(publishBeginEx) && + matchTopic(publishBeginEx) && + matchFlags(publishBeginEx); + } + + private boolean matchClientId( + final MqttPublishBeginExFW publishBeginEx) + { + return clientId == null || clientId.equals(publishBeginEx.clientId()); + } + + + private boolean matchTopic( + final MqttPublishBeginExFW publishBeginEx) + { + return topic == null || topic.equals(publishBeginEx.topic()); + } + + private boolean matchFlags( + final MqttPublishBeginExFW publishBeginEx) + { + return flags == null || flags == publishBeginEx.flags(); + } + } + public final class MqttSubscribeBeginExMatcherBuilder { private String16FW clientId; @@ -1553,7 +1632,6 @@ public final class MqttPublishDataExMatcherBuilder { private MqttBinaryFW.Builder correlationRW; private final DirectBuffer correlationRO = new UnsafeBuffer(0, 0); - private String16FW topic; private Integer qos; private Integer flags; private Integer expiryInterval = -1; @@ -1566,13 +1644,6 @@ private MqttPublishDataExMatcherBuilder() { } - public MqttPublishDataExMatcherBuilder topic( - String topic) - { - this.topic = new String16FW(topic); - return this; - } - public MqttPublishDataExMatcherBuilder qos( String qos) { @@ -1666,8 +1737,7 @@ private boolean match( MqttDataExFW dataEx) { final MqttPublishDataExFW publishDataEx = dataEx.publish(); - return matchTopic(publishDataEx) && - matchQos(publishDataEx) && + return matchQos(publishDataEx) && matchFlags(publishDataEx) && matchExpiryInterval(publishDataEx) && matchContentType(publishDataEx) && @@ -1677,12 +1747,6 @@ private boolean match( matchUserProperties(publishDataEx); } - private boolean matchTopic( - final MqttPublishDataExFW data) - { - return topic == null || topic.equals(data.topic()); - } - private boolean matchQos( final MqttPublishDataExFW data) { diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index a4718e0f40..c070158a1e 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -71,7 +71,7 @@ scope mqtt { uint32 subscriptionId = 0; uint8 qos = 0; - uint8 flags = 1; + uint8 flags = 0; string16 pattern; } @@ -129,6 +129,7 @@ scope mqtt { string16 clientId; string16 topic; + uint8 flags = 0; } union MqttDataEx switch (uint8) extends core::stream::Extension @@ -155,7 +156,6 @@ scope mqtt struct MqttPublishDataEx { int32 deferred = 0; // INIT only (TODO: move to DATA frame) - string16 topic = null; uint8 qos = 0; uint8 flags = 0; int32 expiryInterval = -1; diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/client.rpt new file mode 100644 index 0000000000..65aa4b1e76 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/client.rpt @@ -0,0 +1,23 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +connected + +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/server.rpt new file mode 100644 index 0000000000..edb5fde46f --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/server.rpt @@ -0,0 +1,25 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +connected + +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt new file mode 100644 index 0000000000..bc58168772 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt @@ -0,0 +1,71 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +read "message" + +write close +read closed + +write notify SUBSCRIBE_CLOSED + +connect await SUBSCRIBE_CLOSED + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/two") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .build() + .build()} + +write "message" +write flush + +write close +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt new file mode 100644 index 0000000000..cc4584ba3f --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt @@ -0,0 +1,70 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/two") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .build() + .build()} + +read "message" + +read closed +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt index 9218fc2e2b..c3373f30a9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt index a6480e979d..3e2f7984b9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -34,6 +34,5 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt index 56622b6ab3..59a994fa9e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .publish() .clientId("client") .topic("sensor/one") + .flags("RETAIN") .build() .build()} @@ -31,7 +32,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .flags("RETAIN") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt index a73248bd47..e5234d60ca 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt @@ -21,11 +21,12 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") .topic("sensor/one") + .flags("RETAIN") .build() .build()} @@ -34,7 +35,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .flags("RETAIN") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt index 8a47c697ab..150bd479f6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt index 97a141e137..7700300aff 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt index 55f81d6eb8..152afdd8b0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -40,7 +39,6 @@ write "message1" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -63,7 +61,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} @@ -72,7 +69,6 @@ write "message2" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt index e5908d22bf..1ee6c4cd19 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -43,7 +42,6 @@ read "message1" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -51,7 +49,7 @@ read "message3" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -64,7 +62,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} @@ -73,7 +70,6 @@ read "message2" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .qos("AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt index 1c75847256..e4526a2244 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -40,7 +39,6 @@ write "message1" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt index 8684321d2f..c394626fd3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -43,7 +42,6 @@ read "message1" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt index a5967e8832..ea48bab788 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -40,7 +39,6 @@ write "message1" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -49,7 +47,6 @@ write "message2" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt index aefdff5a25..83562f1d64 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -43,7 +42,6 @@ read "message1" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -52,7 +50,6 @@ read "message2" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt index 0beb41c4a2..f401b1ea4c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -54,7 +53,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} @@ -63,7 +61,6 @@ write "message2" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt index 984617ccd3..0ea4e6e20e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -42,7 +41,7 @@ read "message1" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -55,7 +54,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} @@ -64,7 +62,6 @@ read "message2" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt index af278c7e21..256071880c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -40,7 +39,6 @@ write "message1" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -49,7 +47,6 @@ write "test" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt index 635bc856e7..79730b171b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt @@ -20,7 +20,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -33,7 +33,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -42,7 +41,6 @@ read "message1" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} @@ -51,7 +49,6 @@ read "test" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt index bf770b1dd7..80e57ce0fb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(15) .contentType("message") @@ -46,7 +45,6 @@ write "message" write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(15) .contentType("message") diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt index 1e5af8308b..d9850e07ee 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(15) .contentType("message") @@ -49,7 +48,6 @@ read "message" read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(15) .contentType("message") diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt index f650efd701..c202e5fb9d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(15) .contentType("message") diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt index d23c11c3cd..3bc19525ae 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(15) .contentType("message") diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt index 4fc78dd93d..6862e3d7f4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${mqtt:beginEx() .publish() .clientId("client") .topic("sensor/one") + .flags("RETAIN") .build() .build()} @@ -31,7 +32,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .flags("RETAIN") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt index 74f758d9c5..2eca44d740 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt @@ -21,11 +21,12 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") .topic("sensor/one") + .flags("RETAIN") .build() .build()} @@ -34,7 +35,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .flags("RETAIN") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt index be574df74d..52c7556678 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt @@ -30,7 +30,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("/sensors/1") .userProperty("row1", "1") .userProperty("row2", "2") .build() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt index cc503e4eec..5bbfea710f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt @@ -20,7 +20,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") @@ -32,7 +32,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("/sensors/1") .userProperty("row1", "1") .userProperty("row2", "2") .build() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt index 4a45d83c7b..eca94553b4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt @@ -30,7 +30,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("/sensors/1") .userProperty("row1", "1") .userProperty("row1", "2") .build() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt index 5bf728ae4c..2912229271 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt @@ -20,7 +20,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") @@ -32,7 +32,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("/sensors/1") .userProperty("row1", "1") .userProperty("row1", "2") .build() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt index f29fccc076..3eed4fccb5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt @@ -30,7 +30,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("/sensors/1") .userProperty("row", "1") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt index dad063c03d..22654efe10 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt @@ -20,7 +20,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") @@ -32,7 +32,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("/sensors/1") .userProperty("row", "1") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt index 7ed66fc3ea..75131094ab 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1) + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt index 79ac93bf98..a598da3468 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1) + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt index 2d946a1a43..0bcad34c48 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt index 016b74d411..1fd3cd63f3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") .build() .build()} @@ -51,6 +51,7 @@ write zilla:begin.ext ${mqtt:beginEx() .publish() .clientId("client") .topic("sensor/one") + .flags("RETAIN") .build() .build()} @@ -59,7 +60,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt index 7a05efdbe0..ac3e52e227 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client") - .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL", "SEND_RETAINED") + .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") .build() .build()} @@ -43,11 +43,12 @@ write flush accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") .topic("sensor/one") + .flags("RETAIN") .build() .build()} @@ -56,7 +57,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt index cbddf7285c..fb558b094f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .flags("RETAIN") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt index 42f1dcd638..aefa13a75d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client2") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .flags("RETAIN") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt index 83ec82fc6d..582c34281c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt @@ -31,7 +31,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .flags("RETAIN") .build() .build()} @@ -49,7 +48,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client2") - .filter("sensor/one", 1) + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt index 2e5df58b86..097b64bef3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt @@ -21,7 +21,7 @@ accept "zilla://streams/app0" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client1") @@ -34,7 +34,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .flags("RETAIN") .build() .build()} @@ -48,7 +47,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .subscribe() .clientId("client2") - .filter("sensor/one", 1) + .filter("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt index 86006a1a7a..ae9fbea489 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt @@ -56,7 +56,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt index f5f5b16fba..db5c3252fd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt @@ -42,7 +42,7 @@ write "message" accepted -read zilla:begin.ext ${mqtt:beginEx() +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() .clientId("client") @@ -55,7 +55,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/one") .format("TEXT") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt index 71aa582a5f..864949f7ea 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt @@ -55,7 +55,6 @@ connected write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt index 1c1780bb18..17a5f2539f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt @@ -53,7 +53,6 @@ connected read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() - .topic("sensor/two") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt index a615fc32af..58ca55aadf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt @@ -41,7 +41,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt index 8c1619aedd..fbc06eccd0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt @@ -42,7 +42,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt index 97d4de65a4..ccf49381b4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt @@ -34,7 +34,7 @@ write [0x10 0x13] # CONNECT [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x20 0x03] # CONNACK [0x00] # flags = none diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt index b2083f3c35..db105b99fc 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt @@ -34,7 +34,7 @@ read [0x10 0x13] # CONNECT [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x20 0x03] # CONNACK [0x00] # flags = none diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt new file mode 100644 index 0000000000..fc5499f3d1 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt @@ -0,0 +1,58 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none + +write [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +write [0x30 0x14] # PUBLISH + [0x00 0x0a] "sensor/two" # topic name + [0x00] # properties + "message" # payload + +write [0xe0 0x02] # DISCONNECT + [0x00] # normal disconnect + [0x00] # properties = none + + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt new file mode 100644 index 0000000000..ab8e3998a8 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt @@ -0,0 +1,58 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none + +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +read [0x30 0x14] # PUBLISH + [0x00 0x0a] "sensor/two" # topic name + [0x00] # properties + "message" # payload + +read [0xe0 0x02] # DISCONNECT + [0x00] # normal disconnect + [0x00] # properties = none + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt index bfcdb1d156..ee394def99 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt @@ -47,7 +47,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt index 93437368e9..273ea6dbbf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt @@ -48,7 +48,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt index 214a953528..4030fe9abf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt @@ -48,7 +48,7 @@ write [0x30 0x39] # PUBLISH [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x30 0x3a] # PUBLISH [0x00 0x0a] "sensor/one" # topic name [0x25] # properties diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt index b91aec06c9..463fc62f5b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt @@ -49,7 +49,7 @@ read [0x30 0x39] # PUBLISH [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt index 4f3a33e626..40167198c0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt index f7f7e80abd..e0e6e45a37 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt index 79b4e0b5f8..52ee3f7e37 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt index cd44145bc9..31bd2515d1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt index 98523cb372..1028ffd030 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt index 5b74a727f1..7a36e1223c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt index 84851d383e..1568918c03 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt index 58b8788cba..f346fdfe04 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt index de97a2a227..30590722b8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt @@ -39,10 +39,10 @@ write [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0xa2 0x0f] # UNSUBSCRIBE [0x00 0x02] # packet id = 2 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt index 9c7d67d158..682fdc2853 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt @@ -40,10 +40,10 @@ read [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0xa2 0x0f] # UNSUBSCRIBE [0x00 0x02] # packet id = 2 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt index 75cec60eb5..9aa1320c06 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt index 5369662547..6275448c9e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt new file mode 100644 index 0000000000..e206de2a14 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt @@ -0,0 +1,59 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none + +write [0x82 0x10] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x00] # properties + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +write [0x30 0x16] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x02] # properties + [0x01 0x01] # format = utf-8 + "message" # payload + +read [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x04] # properties + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt new file mode 100644 index 0000000000..693ca30365 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt @@ -0,0 +1,61 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none + +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +read [0x30 0x16] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x02] # properties + [0x01 0x01] # format = utf-8 + "message" # payload + +write [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x04] # properties + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload \ No newline at end of file diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt index 394ce449c2..cf10a9efac 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt index 2a00fd6d2d..89fdc8124f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt index bd5b56bd3e..21fc29335b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt index 1613f1c5c9..4eb5e8814a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt index 54d4a929ea..8113efb2c4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x00] # invalid subscriptionId = 0 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0xe0 0x02] # DISCONNECT = normal [0x82] # protocol error diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt index fd237bf7b4..04a5236644 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x00] # invalid subscriptionId = 0 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0xe0 0x02] # DISCONNECT = normal [0x82] # protocol error diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt index 88223c46a2..0b2b4fd884 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt index 1bec59b7a5..adce45abe2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt index e19108cb1e..388c9dba39 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x04] # options = at-most-once, no-local, send retained + [0x24] # options = at-most-once, no-local read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt index 739316e46a..25b8f9d763 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x04] # options = at-most-once, no-local, send retained + [0x24] # options = at-most-once, no-local write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt index d18dc1cf0d..cb8c25a052 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/+/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 @@ -51,7 +51,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x0a] "sensor/+/1" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt index 72fe7b721d..196f5130f1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/+/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 @@ -52,7 +52,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x0a] "sensor/+/1" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt index 0e3d83a2f3..df3f3f98d4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/+" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt index a333bf4451..80e3f9db30 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/+" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt index 5502124a84..18cade375b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt index 05cab126e9..693ca30365 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt index 762c45c1de..a04837c7ed 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt index 71546b1de6..ab94ad4d69 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt index f1deb49e87..bae0c2cdc0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x14] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0c] "sensor/+/1/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt index d0c8845cf9..1c4a186cfd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x14] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0c] "sensor/+/1/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt index 27336b9784..6d9f1dcfe5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt index 81d4472629..d5ef889cf7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt index 22287a377a..b4f6fda4ab 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/+" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt index d72aec3064..42bd2af797 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/+" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt index 6ebcc04bef..cbd253e529 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/+/+" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt index 8cb126d8e6..c1d9da82e3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/+/+" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt index 1cb03ec6a7..0353af589c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt @@ -39,10 +39,10 @@ write [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt index c752a811da..9589f5edd5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt @@ -40,10 +40,10 @@ read [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt index d4dfa5c059..5ff09fbca3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt @@ -39,10 +39,10 @@ write [0x82 0x1d] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x08] "device/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt index 49467e2d2d..381f20cc2a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt @@ -40,10 +40,10 @@ read [0x82 0x1d] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once -[0x00 0x08] "device/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x00 0x08] "device/#" # topic filter + [0x20] # options = at-most-once write [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt index caacba76e9..7fd54dbcca 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt @@ -39,10 +39,10 @@ write [0x82 0x1b] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x08] "device/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt index 92936e78f2..fd29563aaa 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt @@ -40,10 +40,10 @@ read [0x82 0x1b] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once -[0x00 0x08] "device/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x00 0x08] "device/#" # topic filter + [0x20] # options = at-most-once write [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt index 60cbbd058f..a1a93748aa 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 @@ -51,7 +51,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt index b53d46ff87..ec1a3928af 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 @@ -52,7 +52,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt index 67911e11f7..d5a44c06fa 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 @@ -51,7 +51,7 @@ write [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x08] "device/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt index 37c9e42ca9..9cf15ef8bb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x08] "sensor/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 @@ -52,7 +52,7 @@ read [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x08] "device/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index 2fefc2f6af..181b199d87 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 @@ -51,7 +51,7 @@ write [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x08] "device/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index 307ca61c67..7d197e098a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 @@ -52,7 +52,7 @@ read [0x82 0x10] # SUBSCRIBE [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x08] "device/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt index 713e5e3943..1312d627ea 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt @@ -39,10 +39,10 @@ write [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/+/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/+/1" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt index 399d3c5e9b..152943186e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt @@ -40,10 +40,10 @@ read [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/+/#" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/+/1" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt index db8f57700e..e4c43817bd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt index 61f0e8c32b..e1fd92a605 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt index 0dc3b7a5dd..f78f7c24ac 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt @@ -39,10 +39,10 @@ write [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt index 333e6686d9..8c640277d7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt @@ -40,10 +40,10 @@ read [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt index 8726af2b96..beed4963a2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt @@ -38,7 +38,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt index dc53b38f52..a097586616 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt @@ -39,7 +39,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt index 6f262f1b0b..6d78cacf2d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt @@ -38,7 +38,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt index cb38265c2e..381c026006 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt @@ -39,7 +39,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt index 6bc2fb4be1..1f2e757fe2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt @@ -39,10 +39,10 @@ write [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt index 1b2efb52bd..1627cbe50d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt @@ -40,10 +40,10 @@ read [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt index dab4e2272c..d6832b017f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt index 38e3d3f3e5..6059ab3187 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt index ff12eac7d2..a1bd515cb4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt index b595bf3dba..2d3c972dac 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt index 38dd5dcf14..ab7a2a81c3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt @@ -39,7 +39,7 @@ write [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt index 390f7b216e..3918ccccbe 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt @@ -40,7 +40,7 @@ read [0x82 0x12] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt index 8347000999..ff26c0387b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt @@ -39,10 +39,10 @@ write [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once read [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt index 670278c91a..e204f07d33 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt @@ -40,10 +40,10 @@ read [0x82 0x1f] # SUBSCRIBE [0x02] # properties [0x0b 0x01] # subscription id = 1 [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once [0x00 0x0a] "sensor/two" # topic filter - [0x00] # options = at-most-once, send retained + [0x20] # options = at-most-once write [0x90 0x05] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index ea3eca34a1..bedfb1c6af 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -219,6 +219,31 @@ public void shouldEncodeMqttSubscribeBeginExtWithFlags() 0b0001 == f.flags())); } + @Test + public void shouldMatchPublishBeginExtension() throws Exception + { + BytesMatcher matcher = MqttFunctions.matchBeginEx() + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build(); + + ByteBuffer byteBuf = ByteBuffer.allocate(1024); + + new MqttBeginExFW.Builder() + .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) + .typeId(0x01) + .publish(f -> f + .clientId("client") + .topic("sensor/one") + .flags(1)) + .build(); + + assertNotNull(matcher.match(byteBuf)); + } + @Test public void shouldMatchSubscribeBeginExtension() throws Exception { @@ -259,7 +284,7 @@ public void shouldMatchSubscribeBeginExtensionDefaults() throws Exception .typeId(0x01) .subscribe(f -> f .clientId("client") - .filtersItem(p -> p.subscriptionId(1).qos(0).flags(1).pattern("sensor/one"))) + .filtersItem(p -> p.subscriptionId(1).qos(0).flags(0).pattern("sensor/one"))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -282,7 +307,7 @@ public void shouldMatchSubscribeBeginExtensionNoSubscriptionId() throws Exceptio .typeId(0x01) .subscribe(f -> f .clientId("client") - .filtersItem(p -> p.qos(0).flags(1).pattern("sensor/one"))) + .filtersItem(p -> p.qos(0).flags(0).pattern("sensor/one"))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -410,14 +435,15 @@ public void shouldMatchSessionBeginExtensionWithBytes() throws Exception } @Test - public void shouldEncodeMqttProduceBeginEx() + public void shouldEncodeMqttPublishBeginEx() { final byte[] array = MqttFunctions.beginEx() .typeId(0) .publish() - .clientId("client") - .topic("sensor/one") - .build() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() .build(); DirectBuffer buffer = new UnsafeBuffer(array); @@ -426,6 +452,7 @@ public void shouldEncodeMqttProduceBeginEx() assertEquals(0, mqttBeginEx.kind()); assertEquals("client", mqttBeginEx.publish().clientId().asString()); assertEquals("sensor/one", mqttBeginEx.publish().topic().asString()); + assertEquals(1, mqttBeginEx.publish().flags()); } @Test @@ -829,7 +856,6 @@ public void shouldMatchPublishDataExtension() throws Exception { BytesMatcher matcher = MqttFunctions.matchDataEx() .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .flags("RETAIN") .expiryInterval(20) @@ -848,7 +874,6 @@ public void shouldMatchPublishDataExtension() throws Exception .typeId(0x00) .publish(p -> { - p.topic("sensor/one"); p.qos(0); p.flags(1); p.expiryInterval(20); @@ -868,7 +893,6 @@ public void shouldMatchPublishDataExtensionWithBytes() throws Exception { BytesMatcher matcher = MqttFunctions.matchDataEx() .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .expiryInterval(20) .contentType("message") @@ -886,7 +910,6 @@ public void shouldMatchPublishDataExtensionWithBytes() throws Exception .typeId(0x00) .publish(p -> { - p.topic("sensor/one"); p.qos(0); p.expiryInterval(20); p.contentType("message"); @@ -905,7 +928,6 @@ public void shouldMatchPublishDataExtensionWithEmptyFields() throws Exception { BytesMatcher matcher = MqttFunctions.matchDataEx() .publish() - .topic("sensor/one") .qos("AT_MOST_ONCE") .build() .build(); @@ -917,7 +939,6 @@ public void shouldMatchPublishDataExtensionWithEmptyFields() throws Exception .typeId(0x00) .publish(p -> { - p.topic("sensor/one"); p.flags(0); }) .build(); @@ -931,7 +952,6 @@ public void shouldEncodeMqttPublishDataEx() final byte[] array = MqttFunctions.dataEx() .typeId(0) .publish() - .topic("sensor/one") .expiryInterval(15) .contentType("message") .format("TEXT") @@ -945,7 +965,6 @@ public void shouldEncodeMqttPublishDataEx() MqttDataExFW mqttPublishDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttPublishDataEx.typeId()); - assertEquals("sensor/one", mqttPublishDataEx.publish().topic().asString()); assertEquals(15, mqttPublishDataEx.publish().expiryInterval()); assertEquals("message", mqttPublishDataEx.publish().contentType().asString()); assertEquals("TEXT", mqttPublishDataEx.publish().format().toString()); @@ -964,7 +983,6 @@ public void shouldEncodeMqttPublishDataExWithUserProperty() final byte[] array = MqttFunctions.dataEx() .typeId(0) .publish() - .topic("sensor/one") .userProperty("name", "value") .build() .build(); @@ -973,7 +991,6 @@ public void shouldEncodeMqttPublishDataExWithUserProperty() MqttDataExFW mqttDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttDataEx.typeId()); - assertEquals("sensor/one", mqttDataEx.publish().topic().asString()); assertNotNull(mqttDataEx.publish().properties() .matchFirst(h -> "name".equals(h.key().asString()) && @@ -986,7 +1003,6 @@ public void shouldEncodeMqttPublishDataExWithFlags() final byte[] array = MqttFunctions.dataEx() .typeId(0) .publish() - .topic("sensor/one") .qos("EXACTLY_ONCE") .flags("RETAIN") .build() @@ -996,7 +1012,6 @@ public void shouldEncodeMqttPublishDataExWithFlags() MqttDataExFW mqttDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttDataEx.typeId()); - assertEquals("sensor/one", mqttDataEx.publish().topic().asString()); assertEquals(2, mqttDataEx.publish().qos()); assertEquals(0b0001, mqttDataEx.publish().flags()); } @@ -1027,7 +1042,6 @@ public void shouldEncodeMqttPublishDataExWithUserProperties() final byte[] array = MqttFunctions.dataEx() .typeId(0) .publish() - .topic("sensor/one") .userProperty("name1", "value1") .userProperty("name2", "value2") .build() @@ -1037,7 +1051,6 @@ public void shouldEncodeMqttPublishDataExWithUserProperties() MqttDataExFW mqttDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttDataEx.typeId()); - assertEquals("sensor/one", mqttDataEx.publish().topic().asString()); assertNotNull(mqttDataEx.publish().properties() .matchFirst(h -> "name1".equals(h.key().asString()) && @@ -1067,7 +1080,6 @@ public void shouldEncodeMqttPublishDataExWithoutTopic() MqttDataExFW mqttDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttDataEx.typeId()); - assertNull(mqttDataEx.publish().topic().asString()); assertEquals(15, mqttDataEx.publish().expiryInterval()); assertEquals("message", mqttDataEx.publish().contentType().asString()); assertEquals("TEXT", mqttDataEx.publish().format().toString()); @@ -1086,7 +1098,6 @@ public void shouldEncodeMqttPublishDataExWithoutResponseTopic() final byte[] array = MqttFunctions.dataEx() .typeId(0) .publish() - .topic("sensor/one") .expiryInterval(15) .contentType("message") .format("TEXT") @@ -1099,7 +1110,6 @@ public void shouldEncodeMqttPublishDataExWithoutResponseTopic() MqttDataExFW mqttDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttDataEx.typeId()); - assertEquals("sensor/one", mqttDataEx.publish().topic().asString()); assertEquals(15, mqttDataEx.publish().expiryInterval()); assertEquals("message", mqttDataEx.publish().contentType().asString()); assertEquals("TEXT", mqttDataEx.publish().format().toString()); @@ -1129,7 +1139,6 @@ public void shouldEncodeMqttPublishDataExWithNullDefaults() MqttDataExFW mqttDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttDataEx.typeId()); - assertNull(mqttDataEx.publish().topic().asString()); assertEquals(15, mqttDataEx.publish().expiryInterval()); assertNull(mqttDataEx.publish().contentType().asString()); assertEquals("TEXT", mqttDataEx.publish().format().toString()); @@ -1148,7 +1157,6 @@ public void shouldEncodeMqttPublishDataExWithBytes() final byte[] array = MqttFunctions.dataEx() .typeId(0) .publish() - .topic("sensor/one") .expiryInterval(15) .contentType("message") .format("TEXT") @@ -1162,7 +1170,6 @@ public void shouldEncodeMqttPublishDataExWithBytes() MqttDataExFW mqttDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttDataEx.typeId()); - assertEquals("sensor/one", mqttDataEx.publish().topic().asString()); assertEquals(15, mqttDataEx.publish().expiryInterval()); assertEquals("message", mqttDataEx.publish().contentType().asString()); assertEquals("TEXT", mqttDataEx.publish().format().toString()); @@ -1181,7 +1188,6 @@ public void shouldEncodeMqttPublishDataExWithNullUserPropertyValue() final byte[] array = MqttFunctions.dataEx() .typeId(0) .publish() - .topic("sensor/one") .expiryInterval(15) .contentType("message") .format("TEXT") @@ -1195,7 +1201,6 @@ public void shouldEncodeMqttPublishDataExWithNullUserPropertyValue() MqttDataExFW mqttDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttDataEx.typeId()); - assertEquals("sensor/one", mqttDataEx.publish().topic().asString()); assertEquals(15, mqttDataEx.publish().expiryInterval()); assertEquals("message", mqttDataEx.publish().contentType().asString()); assertEquals("TEXT", mqttDataEx.publish().format().toString()); @@ -1268,6 +1273,6 @@ public void shouldEncodeMqttSessionState() .matchFirst(f -> "sensor/two".equals(f.pattern().asString()) && 0 == f.qos() && - 0b0001 == f.flags())); + 0b0000 == f.flags())); } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java index 34ac95860e..915c4b0e31 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java @@ -45,6 +45,15 @@ public void shouldReceiveClientSentAbort() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/client.sent.close/client", + "${app}/client.sent.close/server"}) + public void shouldReceiveClientSentClose() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/connect.max.packet.size.exceeded/client", @@ -54,4 +63,13 @@ public void shouldNotReceivePublishPacketExceedingMaxPacketLimit() throws Except k3po.finish(); } + @Test + @Specification({ + "${app}/disconnect.after.subscribe.and.publish/client", + "${app}/disconnect.after.subscribe.and.publish/server"}) + public void shouldDisconnectAfterSubscribeAndPublish() throws Exception + { + k3po.finish(); + } + } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java index fb7ba4b644..4f493f24b9 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java @@ -140,7 +140,7 @@ public void shouldSendMessagesWithTopicAliasInvalidScope() throws Exception @Specification({ "${app}/publish.retained/client", "${app}/publish.retained/server"}) - public void shouldSendRetainedMessage() throws Exception + public void shouldPublishRetainedMessage() throws Exception { k3po.finish(); } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java index 4dcacde78a..ab367fe9a8 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java @@ -373,4 +373,13 @@ public void shouldConnectAndSubscribeUnfragmented() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/disconnect.after.subscribe.and.publish/client", + "${net}/disconnect.after.subscribe.and.publish/server"}) + public void shouldDisconnectAfterSubscribeAndPublish() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java index ce7c54e095..010f0bcde3 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java @@ -260,7 +260,7 @@ public void shouldSendMessagesWithTopicAliasInvalidScope() throws Exception @Specification({ "${net}/publish.retained/client", "${net}/publish.retained/server"}) - public void shouldSendRetainedMessage() throws Exception + public void shouldPublishRetainedMessage() throws Exception { k3po.finish(); } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 23a2c254ea..11f76362a3 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -92,9 +92,11 @@ import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Int2IntHashMap; import org.agrona.collections.Int2ObjectHashMap; import org.agrona.collections.Long2ObjectHashMap; import org.agrona.collections.MutableBoolean; +import org.agrona.collections.Object2IntHashMap; import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBinding; @@ -1201,8 +1203,8 @@ private final class MqttServer private final Int2ObjectHashMap publishStreams; private final Int2ObjectHashMap subscribeStreams; private final Int2ObjectHashMap topicAliases; - private final Map subscribePacketIds; - private final Map unsubscribePacketIds; + private final Int2IntHashMap subscribePacketIds; + private final Object2IntHashMap unsubscribePacketIds; private MqttSessionStream sessionStream; @@ -1272,8 +1274,8 @@ private MqttServer( this.publishStreams = new Int2ObjectHashMap<>(); this.subscribeStreams = new Int2ObjectHashMap<>(); this.topicAliases = new Int2ObjectHashMap<>(); - this.subscribePacketIds = new HashMap<>(); - this.unsubscribePacketIds = new HashMap<>(); + this.subscribePacketIds = new Int2IntHashMap(-1); + this.unsubscribePacketIds = new Object2IntHashMap<>(-1); } private void onNetwork( @@ -1791,7 +1793,6 @@ private void onDecodePublish( .typeId(mqttTypeId) .publish(publishBuilder -> { - publishBuilder.topic(mqttPublishHeaderRO.topic); publishBuilder.qos(mqttPublishHeaderRO.qos); publishBuilder.flags(mqttPublishHeaderRO.flags); publishBuilder.expiryInterval(mqttPublishHeaderRO.expiryInterval); @@ -1854,8 +1855,6 @@ private void onDecodeSubscribe( } else { - subscribePacketIds.put(subscriptionId, packetId); - final List newSubscriptions = new ArrayList<>(); for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) @@ -1918,6 +1917,8 @@ private void onDecodeSubscribe( subscription.id = subscriptionId; subscription.filter = filter; subscription.flags = flags; + //TODO: what if we don't have a subscriptionId + subscribePacketIds.put(subscriptionId, packetId); newSubscriptions.add(subscription); } @@ -2149,8 +2150,6 @@ private void onDecodeDisconnect( MqttDisconnectFW disconnect) { state = MqttState.closingInitial(state); - publishStreams.values().forEach(s -> s.doPublishNetEnd(traceId, authorization, EMPTY_OCTETS)); - subscribeStreams.values().forEach(s -> s.doSubscribeNetEnd(traceId, authorization)); if (session) { final MqttEndExFW.Builder builder = mqttEndExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) @@ -2161,6 +2160,7 @@ private void onDecodeDisconnect( MqttEndReasonCode.DISCONNECT)); sessionStream.doSessionAppEnd(traceId, authorization, builder.build()); } + closeStreams(traceId, authorization); doNetworkEnd(traceId, authorization); } @@ -2172,7 +2172,7 @@ private void onDecodeError( switch (reasonCode) { case SESSION_TAKEN_OVER: - cleanupStreamsUsingEnd(traceId, authorization); + closeStreams(traceId, authorization); break; default: cleanupStreamsUsingAbort(traceId, authorization); @@ -2759,12 +2759,12 @@ private void cleanupStreamsUsingAbort( } } - private void cleanupStreamsUsingEnd( + private void closeStreams( long traceId, long authorization) { - publishStreams.values().forEach(s -> s.cleanupEnd(traceId, authorization)); - subscribeStreams.values().forEach(s -> s.cleanupEnd(traceId, authorization)); + publishStreams.values().forEach(s -> s.doPublishAppEnd(traceId, authorization)); + subscribeStreams.values().forEach(s -> s.doSubscribeAppEnd(traceId, authorization)); if (sessionStream != null) { sessionStream.cleanupEnd(traceId, authorization); @@ -3264,24 +3264,9 @@ private void cleanupEnd( long traceId, long authorization) { - doSessionNetEnd(traceId, authorization, EMPTY_OCTETS); doSessionAppEnd(traceId, authorization, EMPTY_OCTETS); } - private void doSessionNetEnd( - long traceId, - long authorization, - Flyweight extension) - { - if (MqttState.initialOpening(state) && !MqttState.initialClosed(state)) - { - setInitialClosed(); - - doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, extension); - } - } - private void doSessionAbort( long traceId, long authorization) @@ -3443,6 +3428,7 @@ private void doPublishBegin( { publishBuilder.clientId(clientId); publishBuilder.topic(topic); + publishBuilder.flags(retainedMessages); }) .build(); @@ -3491,21 +3477,6 @@ private void doPublishAbort( } } - private void doPublishNetEnd( - long traceId, - long authorization, - Flyweight extension) - { - if (MqttState.initialOpening(state) && !MqttState.initialClosed(state)) - { - setPublishNetClosed(); - publishStreams.remove(topicKey); - - doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, extension); - } - } - private void onPublish( int msgTypeId, DirectBuffer buffer, @@ -3635,10 +3606,9 @@ private void onPublishWindow( debitorIndex = debitor.acquire(budgetId, initialId, MqttServer.this::decodeNetwork); } - if (MqttState.initialClosing(state) && - !MqttState.initialClosed(state)) + if (MqttState.initialClosing(state)) { - doPublishNetEnd(traceId, authorization, EMPTY_OCTETS); + doPublishAppEnd(traceId, authorization); } else if (decodePublisherKey == topicKey) { @@ -3688,7 +3658,7 @@ private void onPublishExpiredSignal( final long now = System.currentTimeMillis(); if (now >= publishExpiresAt) { - doPublishNetEnd(traceId, authorization, EMPTY_OCTETS); + doPublishAppEnd(traceId, authorization); } else { @@ -3759,10 +3729,10 @@ private void doPublishAppEnd( long traceId, long authorization) { - if (!MqttState.replyClosed(state)) + if (!MqttState.initialClosed(state)) { - setPublishAppClosed(); - doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + doCancelPublishExpiration(); + doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, EMPTY_OCTETS); } } @@ -3806,15 +3776,6 @@ private void cleanupAbort( doPublishReset(traceId, authorization); doCancelPublishExpiration(); } - - private void cleanupEnd( - long traceId, - long authorization) - { - doPublishNetEnd(traceId, authorization, EMPTY_OCTETS); - doPublishAppEnd(traceId, authorization); - doCancelPublishExpiration(); - } } private class MqttSubscribeStream @@ -3966,7 +3927,7 @@ private void doSubscribeFlushOrEnd( { if (subscriptions.isEmpty()) { - doSubscribeNetEnd(traceId, authorization); + doSubscribeAppEnd(traceId, authorization); } else { @@ -3975,20 +3936,6 @@ private void doSubscribeFlushOrEnd( } } - private void doSubscribeNetEnd( - long traceId, - long authorization) - { - if (MqttState.initialOpening(state) && !MqttState.initialClosed(state)) - { - setNetClosed(); - subscribeStreams.remove(clientKey); - - doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, EMPTY_OCTETS); - } - } - private void doSubscribeAbort( long traceId, long authorization) @@ -4155,7 +4102,10 @@ private void onSubscribeWindow( subscriptionPayload[i] = SUCCESS; } - doEncodeSuback(traceId, authorization, packetId, subscriptionPayload); + if (!MqttState.initialOpened(state)) + { + doEncodeSuback(traceId, authorization, packetId, subscriptionPayload); + } if (session && !sessionStream.deferredUnsubscribes.isEmpty()) { Iterator>> iterator = @@ -4236,14 +4186,6 @@ private void cleanupAbort( doSubscribeReset(traceId, authorization); } - private void cleanupEnd( - long traceId, - long authorization) - { - doSubscribeNetEnd(traceId, authorization); - doSubscribeAppEnd(traceId, authorization); - } - private void doSubscribeWindow( long traceId, @@ -4289,9 +4231,6 @@ private void doSubscribeAppEnd( { if (MqttState.initialOpening(state) && !MqttState.initialClosed(state)) { - setNetClosed(); - subscribeStreams.remove(clientKey); - doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, EMPTY_OCTETS); } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java index 1abac32291..7c671c7208 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java @@ -119,6 +119,22 @@ public void shouldConnectThenDisconnect() throws Exception k3po.finish(); } + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/disconnect.after.subscribe.and.publish/client", + "${app}/disconnect.after.subscribe.and.publish/server"}) + @Configure(name = SESSION_AVAILABLE_NAME, value = "false") + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") + public void shouldDisconnectAfterSubscribeAndPublish() throws Exception + { + k3po.finish(); + } + + @Test @Configuration("server.yaml") @Specification({ diff --git a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java index f857dd303d..a7ed9d6097 100644 --- a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java +++ b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java @@ -1334,7 +1334,6 @@ private void onMqttPublishDataEx( MqttPublishDataExFW publish) { final int deferred = publish.deferred(); - final String topic = publish.topic().asString(); final int flags = publish.flags(); final int expiryInterval = publish.expiryInterval(); final String contentType = publish.contentType().asString(); @@ -1344,8 +1343,8 @@ private void onMqttPublishDataEx( final Array32FW properties = publish.properties(); out.printf(verboseFormat, index, offset, timestamp, - format("[publish] (%d) %s %d %d %s %s %s %s", - deferred, topic, flags, expiryInterval, contentType, format.name(), responseTopic, correlation)); + format("[publish] (%d) %d %d %s %s %s %s", + deferred, flags, expiryInterval, contentType, format.name(), responseTopic, correlation)); properties.forEach(u -> out.printf(verboseFormat, index, offset, timestamp, format("%s %s ", u.key(), u.value()))); } From cd27baea383d82f386ad52e21a188f911c2921a3 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Fri, 21 Jul 2023 04:08:32 +0200 Subject: [PATCH 002/115] Redirect on mqtt reset using server reference (#303) --- .../binding/mqtt/internal/MqttFunctions.java | 63 +++++++++++++++++- .../main/resources/META-INF/zilla/mqtt.idl | 6 ++ .../client.rpt | 38 +++++++++++ .../server.rpt | 41 ++++++++++++ .../client.rpt | 36 ++++++++++ .../server.rpt | 38 +++++++++++ .../client.rpt | 42 ++++++++++++ .../server.rpt | 41 ++++++++++++ .../client.rpt | 36 ++++++++++ .../server.rpt | 37 +++++++++++ .../mqtt/internal/MqttFunctionsTest.java | 19 ++++++ .../mqtt/streams/application/SessionIT.java | 18 +++++ .../mqtt/streams/network/SessionIT.java | 18 +++++ .../mqtt/internal/MqttConfiguration.java | 7 ++ .../mqtt/internal/MqttReasonCodes.java | 1 + .../internal/stream/MqttServerFactory.java | 65 +++++++++++++++---- .../mqtt/internal/MqttConfigurationTest.java | 3 + .../mqtt/internal/stream/SessionIT.java | 31 +++++++++ 18 files changed, 528 insertions(+), 12 deletions(-) create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index e145bf64de..221e865081 100644 --- a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -49,6 +49,7 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttFlushExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttPublishBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttPublishDataExFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttResetExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSubscribeBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSubscribeDataExFW; @@ -104,6 +105,12 @@ public static MqttEndExBuilder endEx() return new MqttEndExBuilder(); } + @Function + public static MqttResetExBuilder resetEx() + { + return new MqttResetExBuilder(); + } + @Function public static MqttSessionStateBuilder session() { @@ -183,6 +190,13 @@ public MqttSessionBeginExBuilder expiry( return this; } + public MqttSessionBeginExBuilder serverReference( + String serverReference) + { + sessionBeginExRW.serverReference(serverReference); + return this; + } + public MqttWillMessageBuilder will() { return new MqttWillMessageBuilder(this); @@ -631,6 +645,39 @@ public byte[] build() } } + public static final class MqttResetExBuilder + { + private final MqttResetExFW.Builder resetExRW; + + private MqttResetExBuilder() + { + MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + this.resetExRW = new MqttResetExFW.Builder().wrap(writeBuffer, 0, writeBuffer.capacity()); + } + + public MqttResetExBuilder typeId( + int typeId) + { + resetExRW.typeId(typeId); + return this; + } + + public MqttResetExBuilder serverReference( + String serverReference) + { + resetExRW.serverReference(serverReference); + return this; + } + + public byte[] build() + { + final MqttResetExFW resetEx = resetExRW.build(); + final byte[] array = new byte[resetEx.sizeof()]; + resetEx.buffer().getBytes(resetEx.offset(), array); + return array; + } + } + public static final class MqttSessionStateBuilder { private final MqttSessionStateFW.Builder sessionStateRW = new MqttSessionStateFW.Builder(); @@ -1053,7 +1100,7 @@ private boolean matchFilters( public final class MqttSessionBeginExMatcherBuilder { private String16FW clientId; - + private String16FW serverReference; private Integer expiry; private MqttWillMessageMatcherBuilder willMessageMatcher; @@ -1075,6 +1122,13 @@ public MqttSessionBeginExMatcherBuilder expiry( return this; } + public MqttSessionBeginExMatcherBuilder serverReference( + String serverReference) + { + this.serverReference = new String16FW(serverReference); + return this; + } + public MqttWillMessageMatcherBuilder will() { this.willMessageMatcher = new MqttWillMessageMatcherBuilder(); @@ -1093,6 +1147,7 @@ private boolean match( final MqttMessageFW willMessage = beginEx.session().will(); return matchClientId(sessionBeginEx) && matchExpiry(sessionBeginEx) && + matchServerReference(sessionBeginEx) && (willMessageMatcher == null || willMessageMatcher.match(willMessage)); } @@ -1108,6 +1163,12 @@ private boolean matchExpiry( return expiry == null || expiry == sessionBeginEx.expiry(); } + private boolean matchServerReference( + final MqttSessionBeginExFW sessionBeginEx) + { + return serverReference == null || serverReference.equals(sessionBeginEx.serverReference()); + } + public final class MqttWillMessageMatcherBuilder { private MqttBinaryFW.Builder correlationRW; diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index c070158a1e..542414acc6 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -116,6 +116,7 @@ scope mqtt { string16 clientId; int32 expiry = 0; + string16 serverReference = null; MqttMessage will; } @@ -166,6 +167,11 @@ scope mqtt MqttUserProperty[] properties; } + struct MqttResetEx extends core::stream::Extension + { + string16 serverReference = null; + } + struct MqttEndEx extends core::stream::Extension { MqttEndReasonCode reasonCode = DISCONNECT; diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt new file mode 100644 index 0000000000..e71aa48366 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt @@ -0,0 +1,38 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .serverReference("localhost:1883") + .build() + .build()} + +connected + +read zilla:data.empty + +read zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .serverReference("localhost:1884") + .build()} + +write aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt new file mode 100644 index 0000000000..d74bfdc289 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .serverReference("localhost:1883") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +write zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .serverReference("localhost:1884") + .build()} + +read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt new file mode 100644 index 0000000000..9b849f7b88 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .serverReference("localhost:1883") + .build() + .build()} + +connected + +read zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .serverReference("localhost:1884") + .build()} + +write aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt new file mode 100644 index 0000000000..c80cf2a278 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt @@ -0,0 +1,38 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .serverReference("localhost:1883") + .build() + .build()} + +connected + +write zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .serverReference("localhost:1884") + .build()} + +read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt new file mode 100644 index 0000000000..286b7882be --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt @@ -0,0 +1,42 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties + +read [0xe0 0x13] # DISCONNECT + [0x9d] # reason code = Use another server + [0x11] # properties + [0x1c 0x00 0x0e] "localhost:1884" # server reference + + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt new file mode 100644 index 0000000000..73a845c997 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties + +write [0xe0 0x13] # DISCONNECT + [0x9d] # reason code = Use another server + [0x11] # properties + [0x1c 0x00 0x0e] "localhost:1884" # server reference diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt new file mode 100644 index 0000000000..caddb654bb --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x14] # CONNACK + [0x00] # flags = none + [0x9d] # reason code = Use another server + [0x11] # properties + [0x1c 0x00 0x0e] "localhost:1884" # server reference diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt new file mode 100644 index 0000000000..1a77fee573 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x14] # CONNACK + [0x00] # flags = none + [0x9d] # reason code = Use another server + [0x11] # properties + [0x1c 0x00 0x0e] "localhost:1884" # server reference diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index bedfb1c6af..6305e0ec73 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -36,6 +36,7 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttEndExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttFlushExFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttResetExFW; public class MqttFunctionsTest { @@ -62,6 +63,7 @@ public void shouldEncodeMqttSessionBeginExt() .session() .clientId("client") .expiry(30) + .serverReference("localhost:1883") .will() .topic("will.client") .delay(20) @@ -81,6 +83,7 @@ public void shouldEncodeMqttSessionBeginExt() assertEquals(2, mqttBeginEx.kind()); assertEquals("client", mqttBeginEx.session().clientId().asString()); + assertEquals("localhost:1883", mqttBeginEx.session().serverReference().asString()); assertEquals(30, mqttBeginEx.session().expiry()); assertEquals("will.client", mqttBeginEx.session().will().topic().asString()); assertEquals(20, mqttBeginEx.session().will().delay()); @@ -320,6 +323,7 @@ public void shouldMatchSessionBeginExtension() throws Exception .session() .clientId("client") .expiry(10) + .serverReference("localhost:1883") .will() .topic("willTopic") .delay(10) @@ -344,6 +348,7 @@ public void shouldMatchSessionBeginExtension() throws Exception .session(s -> s .clientId("client") .expiry(10) + .serverReference("localhost:1883") .will(c -> { c.topic("willTopic"); @@ -1249,6 +1254,20 @@ public void shouldEncodeMqttAbortExAsUnsubscribe() assertEquals(MqttEndReasonCode.KEEP_ALIVE_EXPIRY, mqttEndEx.reasonCode().get()); } + @Test + public void shouldEncodeMqttResetEx() + { + final byte[] array = MqttFunctions.resetEx() + .typeId(0) + .serverReference("localhost:1883") + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttResetExFW mqttResetEx = new MqttResetExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0, mqttResetEx.typeId()); + assertEquals("localhost:1883", mqttResetEx.serverReference().asString()); + } + @Test public void shouldEncodeMqttSessionState() { diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java index 51f6e7030b..88446388e7 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java @@ -154,4 +154,22 @@ public void shouldUnsubscribeAfterSubscribeDeferred() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/session.server.redirect.before.connack/client", + "${app}/session.server.redirect.before.connack/server"}) + public void shouldRedirectBeforeConnack() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/session.server.redirect.after.connack/client", + "${app}/session.server.redirect.after.connack/server"}) + public void shouldRedirectAfterConnack() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java index 8f515004df..8f95ded58b 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java @@ -146,4 +146,22 @@ public void shouldUnsubscribeAfterSubscribeDeferred() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/session.server.redirect.before.connack/client", + "${net}/session.server.redirect.before.connack/server"}) + public void shouldRedirectBeforeConnack() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/session.server.redirect.after.connack/client", + "${net}/session.server.redirect.after.connack/server"}) + public void shouldRedirectAfterConnack() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java index 36794d2dfe..c523fa435d 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java @@ -37,6 +37,7 @@ public class MqttConfiguration extends Configuration public static final BooleanPropertyDef NO_LOCAL; public static final IntPropertyDef SESSION_EXPIRY_GRACE_PERIOD; public static final PropertyDef CLIENT_ID; + public static final PropertyDef SERVER_REFERENCE; static { @@ -57,6 +58,7 @@ public class MqttConfiguration extends Configuration NO_LOCAL = config.property("no.local", true); SESSION_EXPIRY_GRACE_PERIOD = config.property("session.expiry.grace.period", 30); CLIENT_ID = config.property("client.id"); + SERVER_REFERENCE = config.property("server.reference"); MQTT_CONFIG = config; } @@ -140,4 +142,9 @@ public String clientId() { return CLIENT_ID.get(this); } + + public String serverReference() + { + return SERVER_REFERENCE.get(this); + } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java index fca1bfba29..00fbf922a6 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java @@ -57,6 +57,7 @@ public final class MqttReasonCodes public static final byte TOPIC_NAME_INVALID = (byte) 0x90; public static final byte PAYLOAD_FORMAT_INVALID = (byte) 0x99; public static final byte QOS_NOT_SUPPORTED = (byte) 0x9b; + public static final byte SERVER_MOVED = (byte) 0x9d; public static final byte PACKET_IDENTIFIER_IN_USE = (byte) 0x91; public static final byte PACKET_IDENTIFIER_NOT_FOUND = (byte) 0x92; diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 11f76362a3..8039de7d7f 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -26,6 +26,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.PROTOCOL_ERROR; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.QOS_NOT_SUPPORTED; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.RETAIN_NOT_SUPPORTED; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.SERVER_MOVED; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.SESSION_TAKEN_OVER; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.SHARED_SUBSCRIPTION_NOT_SUPPORTED; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.SUBSCRIPTION_IDS_NOT_SUPPORTED; @@ -147,6 +148,7 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttEndExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttPublishDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttResetExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.ResetFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.SignalFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.WindowFW; @@ -234,6 +236,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttPublishDataExFW mqttPublishDataExRO = new MqttPublishDataExFW(); private final MqttDataExFW mqttSubscribeDataExRO = new MqttDataExFW(); + private final MqttResetExFW mqttResetExRO = new MqttResetExFW(); private final MqttBeginExFW.Builder mqttPublishBeginExRW = new MqttBeginExFW.Builder(); private final MqttBeginExFW.Builder mqttSubscribeBeginExRW = new MqttBeginExFW.Builder(); @@ -310,6 +313,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final Map decodersByPacketType; private final boolean session; + private final String serverReference; { final Map decodersByPacketType = new EnumMap<>(MqttPacketType.class); @@ -422,6 +426,7 @@ public MqttServerFactory( final Optional clientId = Optional.ofNullable(config.clientId()).map(String16FW::new); this.supplyClientId = clientId.isPresent() ? clientId::get : () -> new String16FW(UUID.randomUUID().toString()); + this.serverReference = config.serverReference(); } @Override @@ -1676,7 +1681,7 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) } else { - doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false); + doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); connected = true; } @@ -1688,7 +1693,7 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) if (reasonCode != SUCCESS) { doCancelConnectTimeout(); - doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false); + doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); doNetworkEnd(traceId, authorization); decoder = decodeIgnoreAll; progress = connect.limit(); @@ -1719,6 +1724,7 @@ private void resolveSession( { sessionBuilder.clientId(clientId); sessionBuilder.expiry(sessionExpiryInterval); + sessionBuilder.serverReference(serverReference); if (willFlagSet) { final int willFlags = decodeWillFlags(flags); @@ -2180,11 +2186,11 @@ private void onDecodeError( } if (connected) { - doEncodeDisconnect(traceId, authorization, reasonCode); + doEncodeDisconnect(traceId, authorization, reasonCode, null); } else { - doEncodeConnack(traceId, authorization, reasonCode, false, false); + doEncodeConnack(traceId, authorization, reasonCode, false, false, null); } doNetworkEnd(traceId, authorization); @@ -2446,7 +2452,8 @@ private void doEncodeConnack( long authorization, int reasonCode, boolean assignedClientId, - boolean sessionPresent) + boolean sessionPresent, + String16FW serverReference) { int propertiesSize = 0; @@ -2526,6 +2533,14 @@ private void doEncodeConnack( } } + if (serverReference != null) + { + mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) + .serverReference(serverReference) + .build(); + propertiesSize = mqttProperty.limit(); + } + int flags = sessionPresent ? CONNACK_SESSION_PRESENT : 0x00; final int propertiesSize0 = propertiesSize; @@ -2597,14 +2612,28 @@ private void doEncodePingResp( private void doEncodeDisconnect( long traceId, long authorization, - int reasonCode) + int reasonCode, + String16FW serverReference) { + int propertiesSize = 0; + + MqttPropertyFW mqttProperty; + if (serverReference != null) + { + mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) + .serverReference(serverReference) + .build(); + propertiesSize = mqttProperty.limit(); + } + + final int propertySize0 = propertiesSize; final MqttDisconnectFW disconnect = mqttDisconnectRW.wrap(writeBuffer, FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) .typeAndFlags(0xe0) - .remainingLength(2) + .remainingLength(2 + propertySize0) .reasonCode(reasonCode & 0xff) - .properties(p -> p.length(0).value(EMPTY_OCTETS)) + .properties(p -> p.length(propertySize0) + .value(propertyBuffer, 0, propertySize0)) .build(); doNetworkData(traceId, authorization, 0L, disconnect); @@ -3026,10 +3055,24 @@ private void onSessionReset( final long traceId = reset.traceId(); final long authorization = reset.authorization(); - if (!MqttState.initialOpened(state)) + final OctetsFW extension = reset.extension(); + final MqttResetExFW mqttResetEx = extension.get(mqttResetExRO::tryWrap); + + String16FW serverReference = mqttResetEx.serverReference(); + byte reasonCode = SUCCESS; + if (serverReference != null && serverReference.length() != 0) + { + reasonCode = SERVER_MOVED; + } + if (!connected) { doCancelConnectTimeout(); - doEncodeConnack(traceId, authorization, SUCCESS, assignedClientId, false); + + doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, serverReference); + } + else + { + doEncodeDisconnect(traceId, authorization, reasonCode, serverReference); } setInitialClosed(); @@ -3120,7 +3163,7 @@ private void onSessionData( sessionPresent = true; } } - doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, sessionPresent); + doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, sessionPresent, null); connected = true; } else diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java index 3c1b813f58..566864b6c7 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java @@ -23,6 +23,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.NO_LOCAL; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.RETAIN_AVAILABLE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SERVER_REFERENCE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSIONS_AVAILABLE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSION_EXPIRY_GRACE_PERIOD; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSION_EXPIRY_INTERVAL; @@ -51,6 +52,7 @@ public class MqttConfigurationTest public static final String NO_LOCAL_NAME = "zilla.binding.mqtt.no.local"; public static final String SESSION_EXPIRY_GRACE_PERIOD_NAME = "zilla.binding.mqtt.session.expiry.grace.period"; public static final String CLIENT_ID_NAME = "zilla.binding.mqtt.client.id"; + public static final String SERVER_REFERENCE_NAME = "zilla.binding.mqtt.server.reference"; @Test public void shouldVerifyConstants() throws Exception @@ -70,5 +72,6 @@ public void shouldVerifyConstants() throws Exception assertEquals(NO_LOCAL.name(), NO_LOCAL_NAME); assertEquals(SESSION_EXPIRY_GRACE_PERIOD.name(), SESSION_EXPIRY_GRACE_PERIOD_NAME); assertEquals(CLIENT_ID.name(), CLIENT_ID_NAME); + assertEquals(SERVER_REFERENCE.name(), SERVER_REFERENCE_NAME); } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java index 4419c5cbac..35d13dfad2 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java @@ -18,6 +18,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.KEEP_ALIVE_MINIMUM_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SERVER_REFERENCE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_EXPIRY_INTERVAL_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; @@ -241,4 +242,34 @@ public void shouldClientTakeOverSession() throws Exception { k3po.finish(); } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/session.server.redirect.after.connack/client", + "${app}/session.server.redirect.after.connack/server"}) + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") + @Configure(name = SERVER_REFERENCE_NAME, value = "localhost:1883") + public void shouldRedirectAfterConnack() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/session.server.redirect.before.connack/client", + "${app}/session.server.redirect.before.connack/server"}) + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") + @Configure(name = SERVER_REFERENCE_NAME, value = "localhost:1883") + public void shouldRedirectBeforeConnack() throws Exception + { + k3po.finish(); + } } From 4c294a694886eac6455fb46972df129bf2eeee5a Mon Sep 17 00:00:00 2001 From: bmaidics Date: Wed, 26 Jul 2023 01:34:22 +0200 Subject: [PATCH 003/115] Mqtt kafka options (#304) --- .../mqtt/kafka/config/proxy.options.yaml | 27 ++++ .../kafka/schema/mqtt.kafka.schema.patch.json | 35 ++++- .../client.rpt | 49 +++++++ .../server.rpt | 52 +++++++ .../client.rpt | 52 +++++++ .../server.rpt | 56 ++++++++ .../binding/mqtt/kafka/config/SchemaTest.java | 8 ++ .../binding/mqtt/kafka/streams/KafkaIT.java | 18 +++ .../config/MqttKafkaBindingConfig.java | 21 +++ .../config/MqttKafkaOptionsConfig.java | 28 ++++ .../config/MqttKafkaOptionsConfigAdapter.java | 132 ++++++++++++++++++ .../config/MqttKafkaTopicsConfig.java | 36 +++++ .../stream/MqttKafkaPublishFactory.java | 38 ++--- .../stream/MqttKafkaSubscribeFactory.java | 32 +++-- .../src/main/moditect/module-info.java | 3 + ...time.engine.config.OptionsConfigAdapterSpi | 1 + .../MqttKafkaOptionsConfigAdapterTest.java | 101 ++++++++++++++ .../stream/MqttKafkaPublishProxyIT.java | 10 ++ .../stream/MqttKafkaSubscribeProxyIT.java | 10 ++ 19 files changed, 676 insertions(+), 33 deletions(-) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt create mode 100644 incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java create mode 100644 incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java create mode 100644 incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java create mode 100644 incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi create mode 100644 incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml new file mode 100644 index 0000000000..58c7456463 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml @@ -0,0 +1,27 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +--- +name: test +bindings: + mqtt0: + type: mqtt-kafka + kind: proxy + options: + topics: + sessions: sessions + messages: messages + retained: retained + exit: kafka0 diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json index e596826c38..9d4fc0549d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json @@ -32,7 +32,40 @@ "enum": [ "proxy" ] }, "vault": false, - "options": false, + "options": + { + "properties": + { + "topics": + { + "title": "Topics", + "type": "object", + "properties": + { + "sessions": + { + "title": "Kafka Sessions Topic", + "type": "string", + "default": "mqtt_sessions" + }, + "messages": + { + "title": "Kafka Messages Topic", + "type": "string", + "default": "mqtt_messages" + }, + "retained": + { + "title": "Kafka Retained Topic", + "type": "string", + "default": "mqtt_retained" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, "routes": false }, "anyOf": diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt new file mode 100644 index 0000000000..7ce14030c9 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .headerInt("zilla:timeout-ms", 15000) + .header("zilla:content-type", "message") + .header("zilla:format", "TEXT") + .header("zilla:reply-to", "sensor/one") + .header("zilla:correlation-id", "info") + .build() + .build()} + +write "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt new file mode 100644 index 0000000000..3d12d7a752 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .headerInt("zilla:timeout-ms", 15000) + .header("zilla:content-type", "message") + .header("zilla:format", "TEXT") + .header("zilla:reply-to", "sensor/one") + .header("zilla:correlation-id", "info") + .build() + .build()} + +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt new file mode 100644 index 0000000000..03e7f9dbbf --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt new file mode 100644 index 0000000000..5d1ee60fce --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt @@ -0,0 +1,56 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java index 70e6503724..f70d2087fb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java @@ -40,4 +40,12 @@ public void shouldValidateProxy() assertThat(config, not(nullValue())); } + + @Test + public void shouldValidateProxyWithOptions() + { + JsonObject config = schema.validate("proxy.options.yaml"); + + assertThat(config, not(nullValue())); + } } diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java index 0d54dbca57..8790938082 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java @@ -143,6 +143,15 @@ public void shouldSendOneMessage() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/publish.one.message.changed.topic.name/client", + "${kafka}/publish.one.message.changed.topic.name/server"}) + public void shouldSendOneMessageWithChangedTopicName() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${kafka}/publish.retained/client", @@ -305,6 +314,15 @@ public void shouldReceiveOneMessage() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/subscribe.one.message.changed.topic.name/client", + "${kafka}/subscribe.one.message.changed.topic.name/server"}) + public void shouldReceiveOneMessageWithChangedTopicName() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${kafka}/subscribe.one.message.receive.response.topic.and.correlation.data/client", diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java index 044e5918a5..0e5ad114f9 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java @@ -17,7 +17,9 @@ import static java.util.stream.Collectors.toList; import java.util.List; +import java.util.Optional; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; @@ -26,6 +28,7 @@ public class MqttKafkaBindingConfig public final long id; public final String entry; public final KindConfig kind; + public final MqttKafkaOptionsConfig options; public final List routes; public MqttKafkaBindingConfig( @@ -34,6 +37,9 @@ public MqttKafkaBindingConfig( this.id = binding.id; this.entry = binding.entry; this.kind = binding.kind; + this.options = Optional.ofNullable(binding.options) + .map(MqttKafkaOptionsConfig.class::cast) + .orElse(MqttKafkaOptionsConfigAdapter.DEFAULT); this.routes = binding.routes.stream().map(MqttKafkaRouteConfig::new).collect(toList()); } @@ -45,4 +51,19 @@ public MqttKafkaRouteConfig resolve( .findFirst() .orElse(null); } + + public String16FW messagesTopic() + { + return options.topics.messages; + } + + public String16FW sessionsTopic() + { + return options.topics.sessions; + } + + public String16FW retainedTopic() + { + return options.topics.retained; + } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java new file mode 100644 index 0000000000..b2d8c55c28 --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java @@ -0,0 +1,28 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config; + +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public class MqttKafkaOptionsConfig extends OptionsConfig +{ + public final MqttKafkaTopicsConfig topics; + + public MqttKafkaOptionsConfig( + MqttKafkaTopicsConfig topics) + { + this.topics = topics; + } +} diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java new file mode 100644 index 0000000000..a186c84c26 --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java @@ -0,0 +1,132 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config; + +import jakarta.json.Json; +import jakarta.json.JsonObject; +import jakarta.json.JsonObjectBuilder; +import jakarta.json.bind.adapter.JsonbAdapter; + +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaBinding; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; +import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; + +public class MqttKafkaOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter +{ + private static final String TOPICS_NAME = "topics"; + private static final String SESSIONS_NAME = "sessions"; + private static final String MESSAGES_NAME = "messages"; + private static final String RETAINED_NAME = "retained"; + + private static final String16FW SESSIONS_DEFAULT = new String16FW("mqtt_sessions"); + private static final String16FW MESSAGES_DEFAULT = new String16FW("mqtt_messages"); + private static final String16FW RETAINED_DEFAULT = new String16FW("mqtt_retained"); + private static final MqttKafkaTopicsConfig TOPICS_DEFAULT = + new MqttKafkaTopicsConfig(SESSIONS_DEFAULT, MESSAGES_DEFAULT, RETAINED_DEFAULT); + + public static final MqttKafkaOptionsConfig DEFAULT = + new MqttKafkaOptionsConfig(TOPICS_DEFAULT); + + @Override + public Kind kind() + { + return Kind.BINDING; + } + + @Override + public String type() + { + return MqttKafkaBinding.NAME; + } + + @Override + public JsonObject adaptToJson( + OptionsConfig options) + { + MqttKafkaOptionsConfig mqttKafkaOptions = (MqttKafkaOptionsConfig) options; + + JsonObjectBuilder object = Json.createObjectBuilder(); + + MqttKafkaTopicsConfig topics = mqttKafkaOptions.topics; + + if (topics != null && + !TOPICS_DEFAULT.equals(topics)) + { + JsonObjectBuilder newTopics = Json.createObjectBuilder(); + String16FW sessions = topics.sessions; + if (sessions != null && + !(SESSIONS_DEFAULT.equals(sessions))) + { + newTopics.add(SESSIONS_NAME, sessions.asString()); + } + + String16FW messages = topics.messages; + if (messages != null && + !MESSAGES_DEFAULT.equals(messages)) + { + newTopics.add(MESSAGES_NAME, messages.asString()); + } + + String16FW retained = topics.retained; + if (retained != null && + !RETAINED_DEFAULT.equals(retained)) + { + newTopics.add(RETAINED_NAME, retained.asString()); + } + + object.add(TOPICS_NAME, newTopics); + } + + return object.build(); + } + + @Override + public OptionsConfig adaptFromJson( + JsonObject object) + { + MqttKafkaTopicsConfig newTopics = TOPICS_DEFAULT; + + if (object.containsKey(TOPICS_NAME)) + { + JsonObject topics = object.getJsonObject(TOPICS_NAME); + String16FW newSessions = SESSIONS_DEFAULT; + + if (topics.containsKey(SESSIONS_NAME)) + { + newSessions = new String16FW(topics.getString(SESSIONS_NAME)); + } + + String16FW newMessages = MESSAGES_DEFAULT; + + if (topics.containsKey(MESSAGES_NAME)) + { + newMessages = new String16FW(topics.getString(MESSAGES_NAME)); + } + + String16FW newRetained = RETAINED_DEFAULT; + + if (topics.containsKey(RETAINED_NAME)) + { + newRetained = new String16FW(topics.getString(RETAINED_NAME)); + } + + newTopics = new MqttKafkaTopicsConfig(newSessions, newMessages, newRetained); + } + + return new MqttKafkaOptionsConfig(newTopics); + } +} diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java new file mode 100644 index 0000000000..9a5c5066e1 --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java @@ -0,0 +1,36 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config; + + +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; + +public class MqttKafkaTopicsConfig +{ + public final String16FW sessions; + public final String16FW messages; + public final String16FW retained; + + public MqttKafkaTopicsConfig( + String16FW sessions, + String16FW messages, + String16FW retained) + { + this.sessions = sessions; + this.messages = messages; + this.retained = retained; + } +} + diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java index 6027dd0c31..4bbe31ca92 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java @@ -108,9 +108,6 @@ public class MqttKafkaPublishFactory implements BindingHandler private final LongFunction supplyBinding; private final String16FW binaryFormat; private final String16FW textFormat; - private final String16FW kafkaTopic; - private final String16FW kafkaRetainedTopic; - private final int bufferCapacity; public MqttKafkaPublishFactory( MqttKafkaConfiguration config, @@ -119,10 +116,9 @@ public MqttKafkaPublishFactory( { this.mqttTypeId = context.supplyTypeId(MQTT_TYPE_NAME); this.kafkaTypeId = context.supplyTypeId(KAFKA_TYPE_NAME); - this.bufferCapacity = context.writeBuffer().capacity(); - this.writeBuffer = new UnsafeBuffer(new byte[bufferCapacity]); - this.extBuffer = new UnsafeBuffer(new byte[bufferCapacity]); - this.kafkaHeadersBuffer = new UnsafeBuffer(new byte[bufferCapacity]); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.kafkaHeadersBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.helper = new MqttKafkaHeaderHelper(); this.streamFactory = context.streamFactory(); this.supplyInitialId = context::supplyInitialId; @@ -130,8 +126,6 @@ public MqttKafkaPublishFactory( this.supplyBinding = supplyBinding; this.binaryFormat = new String16FW(MqttPayloadFormat.BINARY.name()); this.textFormat = new String16FW(MqttPayloadFormat.TEXT.name()); - this.kafkaTopic = new String16FW(config.messagesTopic()); - this.kafkaRetainedTopic = new String16FW(config.retainedMessagesTopic()); } @Override @@ -151,14 +145,13 @@ public MessageConsumer newStream( final MqttKafkaBindingConfig binding = supplyBinding.apply(routedId); final MqttKafkaRouteConfig resolved = binding != null ? binding.resolve(authorization) : null; - - MessageConsumer newStream = null; if (resolved != null) { final long resolvedId = resolved.id; - newStream = new MqttPublishProxy(mqtt, originId, routedId, initialId, resolvedId)::onMqttMessage; + newStream = new MqttPublishProxy(mqtt, originId, routedId, initialId, resolvedId, + binding.messagesTopic(), binding.retainedTopic())::onMqttMessage; } return newStream; @@ -173,6 +166,8 @@ private final class MqttPublishProxy private final long replyId; private final KafkaMessagesProxy messages; private final KafkaRetainedProxy retained; + private final String16FW kafkaMessagesTopic; + private final String16FW kafkaRetainedTopic; private int state; @@ -191,13 +186,14 @@ private final class MqttPublishProxy private OctetsFW clientIdOctets; private boolean retainAvailable; - private MqttPublishProxy( MessageConsumer mqtt, long originId, long routedId, long initialId, - long resolvedId) + long resolvedId, + String16FW kafkaMessagesTopic, + String16FW kafkaRetainedTopic) { this.mqtt = mqtt; this.originId = originId; @@ -206,6 +202,8 @@ private MqttPublishProxy( this.replyId = supplyReplyId.applyAsLong(initialId); this.messages = new KafkaMessagesProxy(originId, resolvedId, this); this.retained = new KafkaRetainedProxy(originId, resolvedId, this); + this.kafkaMessagesTopic = kafkaMessagesTopic; + this.kafkaRetainedTopic = kafkaRetainedTopic; } private void onMqttMessage( @@ -288,11 +286,11 @@ private void onMqttBegin( .value(topicNameBuffer, 0, topicNameBuffer.capacity()) .build(); - messages.doKafkaBegin(traceId, authorization, affinity); + messages.doKafkaBegin(traceId, authorization, affinity, kafkaMessagesTopic); this.retainAvailable = (mqttPublishBeginEx.flags() & 1 << MqttPublishFlags.RETAIN.value()) != 0; if (retainAvailable) { - retained.doKafkaBegin(traceId, authorization, affinity); + retained.doKafkaBegin(traceId, authorization, affinity, kafkaRetainedTopic); } } @@ -693,7 +691,8 @@ private KafkaMessagesProxy( private void doKafkaBegin( long traceId, long authorization, - long affinity) + long affinity, + String16FW kafkaMessagesTopic) { initialSeq = delegate.initialSeq; initialAck = delegate.initialAck; @@ -701,7 +700,7 @@ private void doKafkaBegin( state = MqttKafkaState.openingInitial(state); kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, kafkaTopic); + traceId, authorization, affinity, kafkaMessagesTopic); } private void doKafkaData( @@ -997,7 +996,8 @@ private KafkaRetainedProxy( private void doKafkaBegin( long traceId, long authorization, - long affinity) + long affinity, + String16FW kafkaRetainedTopic) { initialSeq = delegate.initialSeq; initialAck = delegate.initialAck; diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java index bca0b7335b..5c0d00e5da 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java @@ -133,8 +133,6 @@ public class MqttKafkaSubscribeFactory implements BindingHandler private final int kafkaTypeId; private final LongFunction supplyBinding; private final MqttKafkaHeaderHelper helper; - private final String16FW kafkaMessagesTopicName; - private final String16FW kafkaRetainedTopicName; public MqttKafkaSubscribeFactory( MqttKafkaConfiguration config, @@ -153,8 +151,6 @@ public MqttKafkaSubscribeFactory( this.supplyReplyId = context::supplyReplyId; this.supplyBinding = supplyBinding; this.helper = new MqttKafkaHeaderHelper(); - this.kafkaMessagesTopicName = new String16FW(config.messagesTopic()); - this.kafkaRetainedTopicName = new String16FW(config.retainedMessagesTopic()); } @Override @@ -173,15 +169,18 @@ public MessageConsumer newStream( final MqttKafkaBindingConfig binding = supplyBinding.apply(routedId); - final MqttKafkaRouteConfig resolved = binding != null ? binding.resolve(authorization) : null; - + final MqttKafkaRouteConfig resolved = binding != null ? + binding.resolve(authorization) : null; MessageConsumer newStream = null; if (resolved != null) { final long resolvedId = resolved.id; - newStream = new MqttSubscribeProxy(mqtt, originId, routedId, initialId, resolvedId)::onMqttMessage; + final String16FW kafkaMessagesTopic = binding.messagesTopic(); + final String16FW kafkaRetainedTopic = binding.retainedTopic(); + newStream = new MqttSubscribeProxy(mqtt, originId, routedId, initialId, resolvedId, + kafkaMessagesTopic, kafkaRetainedTopic)::onMqttMessage; } return newStream; @@ -222,7 +221,9 @@ private MqttSubscribeProxy( long originId, long routedId, long initialId, - long resolvedId) + long resolvedId, + String16FW kafkaMessagesTopic, + String16FW kafkaRetainedTopic) { this.mqtt = mqtt; this.originId = originId; @@ -233,8 +234,8 @@ private MqttSubscribeProxy( this.retainedSubscriptionIds = new IntArrayList(); this.retainedSubscriptions = new ArrayList<>(); this.retainAsPublished = new Long2ObjectHashMap<>(); - this.messages = new KafkaMessagesProxy(originId, resolvedId, this); - this.retained = new KafkaRetainedProxy(originId, resolvedId, this); + this.messages = new KafkaMessagesProxy(originId, resolvedId, kafkaMessagesTopic, this); + this.retained = new KafkaRetainedProxy(originId, resolvedId, kafkaRetainedTopic, this); } private void onMqttMessage( @@ -685,6 +686,7 @@ private int replyWindow() final class KafkaMessagesProxy { + private final String16FW topic; private MessageConsumer kafka; private final long originId; private final long routedId; @@ -711,10 +713,12 @@ final class KafkaMessagesProxy private KafkaMessagesProxy( long originId, long routedId, + String16FW topic, MqttSubscribeProxy mqtt) { this.originId = originId; this.routedId = routedId; + this.topic = topic; this.mqtt = mqtt; this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); @@ -734,7 +738,7 @@ private void doKafkaBegin( state = MqttKafkaState.openingInitial(state); kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, mqtt.clientId, kafkaMessagesTopicName, filters, KafkaOffsetType.LIVE); + traceId, authorization, affinity, mqtt.clientId, topic, filters, KafkaOffsetType.LIVE); } } @@ -1164,6 +1168,7 @@ private void doKafkaWindow( final class KafkaRetainedProxy { + private final String16FW topic; private MessageConsumer kafka; private final long originId; private final long routedId; @@ -1185,10 +1190,12 @@ final class KafkaRetainedProxy private KafkaRetainedProxy( long originId, long routedId, + String16FW topic, MqttSubscribeProxy mqtt) { this.originId = originId; this.routedId = routedId; + this.topic = topic; this.mqtt = mqtt; this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); @@ -1231,8 +1238,7 @@ private void doKafkaBegin( kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, mqtt.clientId, kafkaRetainedTopicName, - retainedFilters, KafkaOffsetType.HISTORICAL); + traceId, authorization, affinity, mqtt.clientId, topic, retainedFilters, KafkaOffsetType.HISTORICAL); } private void doKafkaFlush( diff --git a/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java b/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java index 2fe3bda1fa..e8aacb12db 100644 --- a/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java +++ b/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java @@ -22,4 +22,7 @@ provides io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi with io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaConditionConfigAdapter; + provides io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi + with io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaOptionsConfigAdapter; + } diff --git a/incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi b/incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi new file mode 100644 index 0000000000..50a9b1d8b4 --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaOptionsConfigAdapter diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java new file mode 100644 index 0000000000..48445b96ae --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java @@ -0,0 +1,101 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; +import jakarta.json.bind.JsonbConfig; + +import org.junit.Before; +import org.junit.Test; + +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; + +public class MqttKafkaOptionsConfigAdapterTest +{ + private Jsonb jsonb; + + @Before + public void initJson() + { + JsonbConfig config = new JsonbConfig() + .withAdapters(new MqttKafkaOptionsConfigAdapter()); + jsonb = JsonbBuilder.create(config); + } + + @Test + public void shouldReadOptionsWithDefaults() + { + String text = "{ }"; + + MqttKafkaOptionsConfig options = jsonb.fromJson(text, MqttKafkaOptionsConfig.class); + + assertThat(options, not(nullValue())); + assertThat(options.topics, not(nullValue())); + assertThat(options.topics.sessions.asString(), equalTo("mqtt_sessions")); + assertThat(options.topics.messages.asString(), equalTo("mqtt_messages")); + assertThat(options.topics.retained.asString(), equalTo("mqtt_retained")); + } + + @Test + public void shouldReadOptions() + { + String text = + "{" + + "\"topics\":" + + "{" + + "\"sessions\":\"sessions\"," + + "\"messages\":\"messages\"," + + "\"retained\":\"retained\"," + + "}" + + "}"; + + MqttKafkaOptionsConfig options = jsonb.fromJson(text, MqttKafkaOptionsConfig.class); + + assertThat(options, not(nullValue())); + assertThat(options.topics, not(nullValue())); + assertThat(options.topics.sessions.asString(), equalTo("sessions")); + assertThat(options.topics.messages.asString(), equalTo("messages")); + assertThat(options.topics.retained.asString(), equalTo("retained")); + } + + @Test + public void shouldWriteOptions() + { + MqttKafkaOptionsConfig options = new MqttKafkaOptionsConfig( + new MqttKafkaTopicsConfig( + new String16FW("sessions"), + new String16FW("messages"), + new String16FW("retained"))); + + String text = jsonb.toJson(options); + + assertThat(text, not(nullValue())); + assertThat(text, equalTo( + "{" + + "\"topics\":" + + "{" + + "\"sessions\":\"sessions\"," + + "\"messages\":\"messages\"," + + "\"retained\":\"retained\"" + + "}" + + "}")); + } +} diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java index da151e3284..1ec9e315b8 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java @@ -160,6 +160,16 @@ public void shouldSendOneMessage() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.options.yaml") + @Specification({ + "${mqtt}/publish.one.message/client", + "${kafka}/publish.one.message.changed.topic.name/server"}) + public void shouldSendOneMessageWithChangedTopicName() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Specification({ diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java index 9023847b73..9e5cb14695 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java @@ -142,6 +142,16 @@ public void shouldReceiveOneMessage() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.options.yaml") + @Specification({ + "${mqtt}/subscribe.one.message/client", + "${kafka}/subscribe.one.message.changed.topic.name/server"}) + public void shouldReceiveOneMessageWithChangedTopicName() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Specification({ From 16a4513987f7a7c3ce9cff851564f61b91d1ba2e Mon Sep 17 00:00:00 2001 From: bmaidics Date: Wed, 26 Jul 2023 20:35:35 +0200 Subject: [PATCH 004/115] MQTT guard implementation (#307) --- .../config/server.credentials.password.yaml | 39 ++ .../config/server.credentials.username.yaml | 39 ++ .../mqtt/schema/mqtt.schema.patch.json | 81 +++- .../client.rpt | 44 ++ .../server.rpt | 46 ++ .../client.rpt | 33 ++ .../server.rpt | 35 ++ .../client.rpt | 36 ++ .../server.rpt | 37 ++ .../client.rpt | 33 ++ .../server.rpt | 34 ++ .../client.rpt | 46 ++ .../server.rpt | 37 ++ .../specs/binding/mqtt/config/SchemaTest.java | 9 + .../mqtt/streams/network/ConnectionIT.java | 36 ++ .../config/MqttAuthorizationConfig.java | 81 ++++ .../internal/config/MqttBindingConfig.java | 75 ++++ .../internal/config/MqttOptionsConfig.java | 29 ++ .../config/MqttOptionsConfigAdapter.java | 150 +++++++ .../internal/stream/MqttServerFactory.java | 394 +++++++++--------- .../src/main/moditect/module-info.java | 3 + ...time.engine.config.OptionsConfigAdapterSpi | 1 + .../config/MqttOptionsConfigAdapterTest.java | 115 +++++ .../mqtt/internal/stream/ConnectionIT.java | 57 +++ 24 files changed, 1302 insertions(+), 188 deletions(-) create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt create mode 100644 incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java create mode 100644 incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java create mode 100644 incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java create mode 100644 incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi create mode 100644 incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml new file mode 100644 index 0000000000..cd582c7254 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml @@ -0,0 +1,39 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +--- +name: test +guards: + test0: + type: test + options: + credentials: TOKEN + lifetime: PT5S + challenge: PT5S +bindings: + net0: + type: mqtt + kind: server + options: + authorization: + test0: + credentials: + connect: + password: Bearer {credentials} + routes: + - exit: app0 + guarded: + test0: [] diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml new file mode 100644 index 0000000000..dcc3bf8b4b --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml @@ -0,0 +1,39 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +--- +name: test +guards: + test0: + type: test + options: + credentials: TOKEN + lifetime: PT5S + challenge: PT5S +bindings: + net0: + type: mqtt + kind: server + options: + authorization: + test0: + credentials: + connect: + username: Bearer {credentials} + routes: + - exit: app0 + guarded: + test0: [] diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json index 21575a7bcd..f574e92425 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json @@ -32,7 +32,86 @@ "enum": [ "server" ] }, "vault": false, - "options": false, + "options": + { + "properties": + { + "authorization": + { + "title": "Authorizations", + "type": "object", + "patternProperties": + { + "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": + { + "title": "Guard", + "type": "object", + "properties": + { + "credentials": + { + "title": "Credentials", + "type": "object", + "properties": + { + "connect": + { + "title": "Connect", + "type": "object", + "properties": + { + "username": + { + "title": "Username", + "type": "string" + }, + "password": + { + "title": "Password", + "type": "string" + } + }, + "oneOf": + [ + { + "required": + [ + "username" + ] + }, + { + "required": + [ + "password" + ] + } + ] + } + }, + "additionalProperties": false, + "anyOf": + [ + { + "required": + [ + "connect" + ] + } + ] + } + }, + "additionalProperties": false, + "required": + [ + "credentials" + ] + } + }, + "maxProperties": 1 + } + }, + "additionalProperties": false + }, "routes": { "items": diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/client.rpt new file mode 100644 index 0000000000..fde90fdb5e --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/client.rpt @@ -0,0 +1,44 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:authorization 1L + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .qos("AT_MOST_ONCE") + .expiryInterval(15) + .contentType("message") + .format("TEXT") + .responseTopic("sensor/one") + .correlation("info") + .build() + .build()} + +write "message" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/server.rpt new file mode 100644 index 0000000000..2555639ec8 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/server.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:authorization 1L + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .qos("AT_MOST_ONCE") + .expiryInterval(15) + .contentType("message") + .format("TEXT") + .responseTopic("sensor/one") + .correlation("info") + .build() + .build()} + +read "message" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt new file mode 100644 index 0000000000..47b6b7eeba --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt @@ -0,0 +1,33 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x25] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x42] # flags = password, clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + [0x00 0x10] "Bearer INCORRECT" # password + +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt new file mode 100644 index 0000000000..1c62c0f0bf --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x25] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x42] # flags = password, clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + [0x00 0x10] "Bearer INCORRECT" # password + + +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt new file mode 100644 index 0000000000..f5732d9ba7 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x21] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x42] # flags = password, clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + [0x00 0x0c] "Bearer TOKEN" # password + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt new file mode 100644 index 0000000000..c16e710bce --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x21] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x42] # flags = password, clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + [0x00 0x0c] "Bearer TOKEN" # password + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt new file mode 100644 index 0000000000..b95a982110 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt @@ -0,0 +1,33 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x25] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x82] # flags = username, clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + [0x00 0x10] "Bearer INCORRECT" # username + +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt new file mode 100644 index 0000000000..e9113e3ce0 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt @@ -0,0 +1,34 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x25] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x82] # flags = username, clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + [0x00 0x10] "Bearer INCORRECT" # username + +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt new file mode 100644 index 0000000000..6ae62af4fa --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x21] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x82] # flags = username, clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + [0x00 0x0c] "Bearer TOKEN" # password + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none + +write [0x30 0x39] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x25] # properties + [0x02] 0x0f # expiry = 15 seconds + [0x03 0x00 0x07] "message" # content type + [0x01 0x01] # format = utf-8 + [0x08 0x00 0x0a] "sensor/one" # response topic + [0x09 0x00 0x04] "info" # correlation data + "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt new file mode 100644 index 0000000000..97bd5176c3 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x21] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x82] # flags = username, clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + [0x00 0x0c] "Bearer TOKEN" # username + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java index 6541665d98..2021142ca5 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java @@ -32,6 +32,7 @@ public class SchemaTest @Rule public final ConfigSchemaRule schema = new ConfigSchemaRule() .schemaPatch("io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json") + .schemaPatch("io/aklivity/zilla/specs/engine/schema/guard/test.schema.patch.json") .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config"); @Ignore("TODO") @@ -87,6 +88,14 @@ public void shouldValidateServer() assertThat(config, not(nullValue())); } + @Test + public void shouldValidateServerWithAuthorizationOptions() + { + JsonObject config = schema.validate("server.credentials.username.yaml"); + + assertThat(config, not(nullValue())); + } + @Test public void shouldValidateServerWhenTopic() { diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java index ab367fe9a8..71c2b374a5 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java @@ -46,6 +46,42 @@ public void shouldConnect() throws Exception k3po.finish(); } + @Test + @Specification({ + "${net}/connect.username.authentication.successful/client", + "${net}/connect.username.authentication.successful/server"}) + public void shouldAuthenticateUsernameAndConnect() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/connect.username.authentication.failed/client", + "${net}/connect.username.authentication.failed/server"}) + public void shouldFailUsernameAuthentication() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/connect.password.authentication.successful/client", + "${net}/connect.password.authentication.successful/server"}) + public void shouldAuthenticatePasswordAndConnect() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/connect.password.authentication.failed/client", + "${net}/connect.password.authentication.failed/server"}) + public void shouldFailPasswordAuthentication() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${net}/connect.server.assigned.client.id/client", diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java new file mode 100644 index 0000000000..e4f19cc6a0 --- /dev/null +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java @@ -0,0 +1,81 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.aklivity.zilla.runtime.binding.mqtt.internal.config; + +import java.util.List; + +public final class MqttAuthorizationConfig +{ + public final String name; + public final MqttCredentialsConfig credentials; + + public MqttAuthorizationConfig( + String name, + MqttCredentialsConfig credentials) + { + this.name = name; + this.credentials = credentials; + } + + public static final class MqttCredentialsConfig + { + public final List connect; + + public MqttCredentialsConfig( + List connect) + { + this.connect = connect; + } + } + + public enum MqttConnectProperty + { + USERNAME, + PASSWORD; + + public static MqttConnectProperty ofName( + String value) + { + MqttConnectProperty field = null; + switch (value) + { + case "username": + field = USERNAME; + break; + case "password": + field = PASSWORD; + break; + } + return field; + } + } + + public static final class MqttPatternConfig + { + public final MqttConnectProperty property; + public final String pattern; + + public MqttPatternConfig( + MqttConnectProperty property, + String pattern) + { + this.property = property; + this.pattern = pattern; + } + } +} + diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java index bc98037e48..0d5ef78b8d 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java @@ -18,17 +18,29 @@ import static java.util.stream.Collectors.toList; import java.util.List; +import java.util.function.Function; +import java.util.function.ToLongFunction; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttConnectProperty; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttCredentialsConfig; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttPatternConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; public final class MqttBindingConfig { + private static final Function DEFAULT_CREDENTIALS = x -> null; + public final long id; public final String name; public final KindConfig kind; + public final MqttOptionsConfig options; public final List routes; + public final Function credentials; + public final ToLongFunction resolveId; public MqttBindingConfig( BindingConfig binding) @@ -37,6 +49,19 @@ public MqttBindingConfig( this.name = binding.name; this.kind = binding.kind; this.routes = binding.routes.stream().map(MqttRouteConfig::new).collect(toList()); + this.options = (MqttOptionsConfig) binding.options; + this.resolveId = binding.resolveId; + this.credentials = options != null && options.authorization != null ? + asAccessor(options.authorization.credentials) : DEFAULT_CREDENTIALS; + } + + public MqttRouteConfig resolve( + long authorization) + { + return routes.stream() + .filter(r -> r.authorized(authorization)) + .findFirst() + .orElse(null); } public MqttRouteConfig resolve( @@ -59,4 +84,54 @@ public MqttRouteConfig resolve( .findFirst() .orElse(null); } + + public Function credentials() + { + return credentials; + } + + public MqttConnectProperty authField() + { + return options != null && options.authorization != null ? + options.authorization.credentials.connect.get(0).property : null; + } + + private Function asAccessor( + MqttCredentialsConfig credentials) + { + Function accessor = DEFAULT_CREDENTIALS; + List connectPatterns = credentials.connect; + + if (connectPatterns != null && !connectPatterns.isEmpty()) + { + MqttPatternConfig config = connectPatterns.get(0); + + Matcher connectMatch = + Pattern.compile(config.pattern.replace("{credentials}", "(?[^\\s]+)")) + .matcher(""); + + accessor = orElseIfNull(accessor, connect -> + { + String result = null; + if (connect != null && connectMatch.reset(connect).matches()) + { + result = connectMatch.group("credentials"); + } + return result; + }); + } + + return accessor; + } + + private static Function orElseIfNull( + Function first, + Function second) + { + return x -> + { + String result = first.apply(x); + return result != null ? result : second.apply(x); + }; + } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java new file mode 100644 index 0000000000..f3862661e7 --- /dev/null +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java @@ -0,0 +1,29 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.config; + +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public class MqttOptionsConfig extends OptionsConfig +{ + public final MqttAuthorizationConfig authorization; + + public MqttOptionsConfig( + MqttAuthorizationConfig authorization) + { + this.authorization = authorization; + } +} diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java new file mode 100644 index 0000000000..d913fa7c3a --- /dev/null +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java @@ -0,0 +1,150 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.config; + +import java.util.ArrayList; +import java.util.List; + +import jakarta.json.Json; +import jakarta.json.JsonObject; +import jakarta.json.JsonObjectBuilder; +import jakarta.json.bind.adapter.JsonbAdapter; + +import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBinding; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttCredentialsConfig; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttPatternConfig; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; +import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; + +public class MqttOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter +{ + private static final String AUTHORIZATION_NAME = "authorization"; + private static final String AUTHORIZATION_CREDENTIALS_NAME = "credentials"; + private static final String AUTHORIZATION_CREDENTIALS_CONNECT_NAME = "connect"; + private static final String AUTHORIZATION_CREDENTIALS_USERNAME_NAME = "username"; + private static final String AUTHORIZATION_CREDENTIALS_PASSWORD_NAME = "password"; + + @Override + public Kind kind() + { + return Kind.BINDING; + } + + @Override + public String type() + { + return MqttBinding.NAME; + } + + @Override + public JsonObject adaptToJson( + OptionsConfig options) + { + MqttOptionsConfig mqttOptions = (MqttOptionsConfig) options; + + JsonObjectBuilder object = Json.createObjectBuilder(); + + MqttAuthorizationConfig mqttAuthorization = mqttOptions.authorization; + if (mqttAuthorization != null) + { + JsonObjectBuilder authorizations = Json.createObjectBuilder(); + + JsonObjectBuilder authorization = Json.createObjectBuilder(); + + MqttCredentialsConfig mqttCredentials = mqttAuthorization.credentials; + if (mqttCredentials != null) + { + JsonObjectBuilder credentials = Json.createObjectBuilder(); + + if (mqttCredentials.connect != null) + { + JsonObjectBuilder connect = Json.createObjectBuilder(); + + mqttCredentials.connect.forEach(p -> connect.add(p.property.name().toLowerCase(), p.pattern)); + + credentials.add(AUTHORIZATION_CREDENTIALS_CONNECT_NAME, connect); + } + + authorization.add(AUTHORIZATION_CREDENTIALS_NAME, credentials); + + authorizations.add(mqttAuthorization.name, authorization); + } + + object.add(AUTHORIZATION_NAME, authorizations); + } + + return object.build(); + } + + @Override + public OptionsConfig adaptFromJson( + JsonObject object) + { + MqttAuthorizationConfig newAuthorization = null; + + JsonObject authorizations = object.containsKey(AUTHORIZATION_NAME) + ? object.getJsonObject(AUTHORIZATION_NAME) + : null; + + if (authorizations != null) + { + for (String name : authorizations.keySet()) + { + JsonObject authorization = authorizations.getJsonObject(name); + + MqttCredentialsConfig newCredentials = null; + + JsonObject credentials = authorization.getJsonObject(AUTHORIZATION_CREDENTIALS_NAME); + + if (credentials != null) + { + List newConnect = + adaptPatternFromJson(credentials, AUTHORIZATION_CREDENTIALS_CONNECT_NAME); + + newCredentials = new MqttCredentialsConfig(newConnect); + } + + newAuthorization = new MqttAuthorizationConfig(name, newCredentials); + } + } + + return new MqttOptionsConfig(newAuthorization); + } + + private List adaptPatternFromJson( + JsonObject object, + String property) + { + List newPatterns = null; + if (object.containsKey(property)) + { + newPatterns = new ArrayList<>(); + + JsonObject patterns = object.getJsonObject(property); + for (String name : patterns.keySet()) + { + name = name.toLowerCase(); + if (name.equals(AUTHORIZATION_CREDENTIALS_USERNAME_NAME) || + name.equals(AUTHORIZATION_CREDENTIALS_PASSWORD_NAME)) + { + String pattern = patterns.getString(name); + newPatterns.add(new MqttPatternConfig(MqttAuthorizationConfig.MqttConnectProperty.ofName(name), pattern)); + } + } + } + return newPatterns; + } +} diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 8039de7d7f..91b326a2dd 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.BAD_AUTHENTICATION_METHOD; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.BAD_USER_NAME_OR_PASSWORD; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.CLIENT_IDENTIFIER_NOT_VALID; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.DISCONNECT_WITH_WILL_MESSAGE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.KEEP_ALIVE_TIMEOUT; @@ -82,15 +83,14 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; import java.util.function.Supplier; +import java.util.function.ToLongFunction; import java.util.stream.Collectors; -import jakarta.json.Json; -import jakarta.json.JsonBuilderFactory; - import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.collections.Int2IntHashMap; @@ -103,7 +103,9 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBinding; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttValidator; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttConnectProperty; import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttBindingConfig; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttOptionsConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttRouteConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Flyweight; @@ -160,6 +162,7 @@ import io.aklivity.zilla.runtime.engine.buffer.BufferPool; import io.aklivity.zilla.runtime.engine.concurrent.Signaler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; +import io.aklivity.zilla.runtime.engine.guard.GuardHandler; public final class MqttServerFactory implements MqttStreamFactory { @@ -216,8 +219,6 @@ public final class MqttServerFactory implements MqttStreamFactory private static final String16FW NULL_STRING = new String16FW((String) null); public static final String SHARED_SUBSCRIPTION_LITERAL = "$share"; - private final JsonBuilderFactory json = Json.createBuilderFactory(new HashMap<>()); - private final BeginFW beginRO = new BeginFW(); private final DataFW dataRO = new DataFW(); private final EndFW endRO = new EndFW(); @@ -353,6 +354,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final LongSupplier supplyTraceId; private final LongSupplier supplyBudgetId; private final LongFunction supplyDebitor; + private final LongFunction supplyGuard; private final Long2ObjectHashMap bindings; private final int mqttTypeId; @@ -404,6 +406,7 @@ public MqttServerFactory( this.supplyReplyId = context::supplyReplyId; this.supplyBudgetId = context::supplyBudgetId; this.supplyTraceId = context::supplyTraceId; + this.supplyGuard = context::supplyGuard; this.bindings = new Long2ObjectHashMap<>(); this.mqttTypeId = context.supplyTypeId(MqttBinding.NAME); this.publishTimeoutMillis = SECONDS.toMillis(config.publishTimeout()); @@ -468,6 +471,10 @@ public MessageConsumer newStream( final long budgetId = supplyBudgetId.getAsLong(); newStream = new MqttServer( + binding.credentials(), + binding.authField(), + binding.options, + binding.resolveId, sender, originId, routedId, @@ -1210,6 +1217,9 @@ private final class MqttServer private final Int2ObjectHashMap topicAliases; private final Int2IntHashMap subscribePacketIds; private final Object2IntHashMap unsubscribePacketIds; + private final GuardHandler guard; + private final Function credentials; + private final MqttConnectProperty authField; private MqttSessionStream sessionStream; @@ -1258,8 +1268,13 @@ private final class MqttServer private int propertyMask = 0; private int state; + private long sessionId; private MqttServer( + Function credentials, + MqttConnectProperty authField, + MqttOptionsConfig options, + ToLongFunction resolveId, MessageConsumer network, long originId, long routedId, @@ -1281,6 +1296,9 @@ private MqttServer( this.topicAliases = new Int2ObjectHashMap<>(); this.subscribePacketIds = new Int2IntHashMap(-1); this.unsubscribePacketIds = new Object2IntHashMap<>(-1); + this.guard = resolveGuard(options, resolveId); + this.credentials = credentials; + this.authField = authField; } private void onNetwork( @@ -1394,7 +1412,7 @@ private void onNetworkEnd( { state = MqttState.closeInitial(state); - cleanupStreamsUsingAbort(traceId, authorization); + cleanupStreamsUsingAbort(traceId); doNetworkEnd(traceId, authorization); @@ -1506,7 +1524,7 @@ private void onKeepAliveTimeoutSignal( final MqttEndExFW.Builder builder = mqttEndExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) .typeId(mqttTypeId) .reasonCode(r -> r.set(MqttEndReasonCode.KEEP_ALIVE_EXPIRY)); - sessionStream.doSessionAppEnd(traceId, authorization, builder.build()); + sessionStream.doSessionAppEnd(traceId, builder.build()); } onDecodeError(traceId, authorization, KEEP_ALIVE_TIMEOUT); decoder = decodeIgnoreAll; @@ -1527,7 +1545,7 @@ private void onConnectTimeoutSignal( final long now = System.currentTimeMillis(); if (now >= connectTimeoutAt) { - cleanupStreamsUsingAbort(traceId, authorization); + cleanupStreamsUsingAbort(traceId); doNetworkEnd(traceId, authorization); decoder = decodeIgnoreAll; } @@ -1675,9 +1693,43 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) serverDefinedKeepAlive = keepAlive != connect.keepAlive(); keepAliveTimeout = Math.round(TimeUnit.SECONDS.toMillis(keepAlive) * 1.5); doSignalKeepAliveTimeout(); + + long sessionAuth = authorization; + if (guard != null) + { + String authField = null; + if (this.authField.equals(MqttConnectProperty.USERNAME)) + { + authField = payload.username != null ? payload.username.asString() : null; + } + else if (this.authField.equals(MqttConnectProperty.PASSWORD)) + { + authField = payload.password != null ? + payload.password.bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)) : null; + } + + final String credentialsMatch = credentials.apply(authField); + + if (credentialsMatch != null) + { + sessionAuth = guard.reauthorize(initialId, credentialsMatch); + } + } + + final MqttBindingConfig binding = bindings.get(routedId); + + final MqttRouteConfig resolved = binding != null ? binding.resolve(sessionAuth) : null; + + if (resolved == null) + { + reasonCode = BAD_USER_NAME_OR_PASSWORD; + break decode; + } + + this.sessionId = sessionAuth; if (session) { - resolveSession(traceId, authorization, reasonCode, connect, payload); + resolveSession(traceId, sessionAuth, resolved.id, connect, payload); } else { @@ -1690,7 +1742,14 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) decoder = decodePacketType; } - if (reasonCode != SUCCESS) + if (reasonCode == BAD_USER_NAME_OR_PASSWORD) + { + doCancelConnectTimeout(); + doNetworkEnd(traceId, authorization); + decoder = decodeIgnoreAll; + progress = connect.limit(); + } + else if (reasonCode != SUCCESS) { doCancelConnectTimeout(); doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); @@ -1704,58 +1763,51 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) private void resolveSession( long traceId, long authorization, - int reasonCode, + long resolvedId, MqttConnectFW connect, MqttConnectPayload payload) { final int flags = connect.flags(); - final MqttBindingConfig binding = bindings.get(routedId); - final MqttRouteConfig resolved = binding != null ? binding.resolve(authorization, MqttCapabilities.SESSION) : null; - - if (resolved != null) - { - final long resolvedId = resolved.id; - final boolean willFlagSet = isSetWillFlag(flags); + final boolean willFlagSet = isSetWillFlag(flags); - final MqttBeginExFW.Builder builder = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) - .typeId(mqttTypeId) - .session(sessionBuilder -> - { - sessionBuilder.clientId(clientId); - sessionBuilder.expiry(sessionExpiryInterval); - sessionBuilder.serverReference(serverReference); - if (willFlagSet) - { - final int willFlags = decodeWillFlags(flags); - final int willQos = decodeWillQos(flags); - final MqttMessageFW.Builder willMessageBuilder = - mqttMessageFW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity()) - .topic(payload.willTopic) - .delay(payload.willDelay) - .qos(willQos) - .flags(willFlags) - .expiryInterval(payload.expiryInterval) - .contentType(payload.contentType) - .format(f -> f.set(payload.payloadFormat)) - .responseTopic(payload.responseTopic) - .correlation(c -> c.bytes(payload.correlationData)); - - final Array32FW userProperties = willUserPropertiesRW.build(); - userProperties.forEach( - c -> willMessageBuilder.propertiesItem(p -> p.key(c.key()).value(c.value()))); - willMessageBuilder.payload(p -> p.bytes(payload.willPayload.bytes())); - sessionBuilder.will(willMessageBuilder.build()); - } - }); - - if (sessionStream == null) + final MqttBeginExFW.Builder builder = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> { - sessionStream = new MqttSessionStream(originId, resolvedId, 0); - } + sessionBuilder.clientId(clientId); + sessionBuilder.expiry(sessionExpiryInterval); + sessionBuilder.serverReference(serverReference); + if (willFlagSet) + { + final int willFlags = decodeWillFlags(flags); + final int willQos = decodeWillQos(flags); + final MqttMessageFW.Builder willMessageBuilder = + mqttMessageFW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity()) + .topic(payload.willTopic) + .delay(payload.willDelay) + .qos(willQos) + .flags(willFlags) + .expiryInterval(payload.expiryInterval) + .contentType(payload.contentType) + .format(f -> f.set(payload.payloadFormat)) + .responseTopic(payload.responseTopic) + .correlation(c -> c.bytes(payload.correlationData)); + + final Array32FW userProperties = willUserPropertiesRW.build(); + userProperties.forEach( + c -> willMessageBuilder.propertiesItem(p -> p.key(c.key()).value(c.value()))); + willMessageBuilder.payload(p -> p.bytes(payload.willPayload.bytes())); + sessionBuilder.will(willMessageBuilder.build()); + } + }); - sessionStream.doSessionBegin(traceId, authorization, affinity, builder.build()); + if (sessionStream == null) + { + sessionStream = new MqttSessionStream(originId, resolvedId, 0); } + + sessionStream.doSessionBegin(traceId, affinity, builder.build()); } private MqttPublishStream resolvePublishStream( @@ -1767,7 +1819,7 @@ private MqttPublishStream resolvePublishStream( final MqttBindingConfig binding = bindings.get(routedId); final MqttRouteConfig resolved = binding != null ? - binding.resolve(authorization, topic, MqttCapabilities.PUBLISH_ONLY) : null; + binding.resolve(sessionId, topic, MqttCapabilities.PUBLISH_ONLY) : null; if (resolved != null) { @@ -1775,7 +1827,7 @@ private MqttPublishStream resolvePublishStream( final int topicKey = topicKey(topic); stream = publishStreams.computeIfAbsent(topicKey, s -> new MqttPublishStream(routedId, resolvedId, topic)); - stream.doPublishBegin(traceId, authorization, affinity); + stream.doPublishBegin(traceId, affinity); } else { @@ -1814,7 +1866,7 @@ private void onDecodePublish( final MqttDataExFW dataEx = builder.build(); if (stream != null) { - stream.doPublishData(traceId, authorization, reserved, payload, dataEx); + stream.doPublishData(traceId, reserved, payload, dataEx); } doSignalKeepAliveTimeout(); } @@ -1952,7 +2004,7 @@ private void onDecodeSubscribe( //TODO: is this correct? What is this? int reserved = payloadSize; - sessionStream.doSessionData(traceId, authorization, reserved, sessionState); + sessionStream.doSessionData(traceId, reserved, sessionState); } else { @@ -1975,7 +2027,7 @@ private void openSubscribeStreams( { final MqttBindingConfig binding = bindings.get(routedId); final MqttRouteConfig resolved = - binding != null ? binding.resolve(authorization, subscription.filter, MqttCapabilities.SUBSCRIBE_ONLY) : null; + binding != null ? binding.resolve(sessionId, subscription.filter, MqttCapabilities.SUBSCRIBE_ONLY) : null; if (resolved != null) { @@ -1990,7 +2042,7 @@ private void openSubscribeStreams( MqttSubscribeStream stream = subscribeStreams.computeIfAbsent(subscribeKey, s -> new MqttSubscribeStream(routedId, key, adminSubscribe)); stream.packetId = packetId; - stream.doSubscribeBeginOrFlush(traceId, authorization, affinity, subscribeKey, value); + stream.doSubscribeBeginOrFlush(traceId, affinity, subscribeKey, value); }); } @@ -2094,7 +2146,7 @@ private void sendNewSessionStateForUnsubscribe( //TODO: is this correct? What is this? int reserved = payloadSize; - sessionStream.doSessionData(traceId, authorization, reserved, sessionState); + sessionStream.doSessionData(traceId, reserved, sessionState); } private void sendUnsuback( @@ -2116,7 +2168,7 @@ private void sendUnsuback( { final MqttBindingConfig binding = bindings.get(routedId); final MqttRouteConfig resolved = - binding != null ? binding.resolve(authorization, topicFilter, MqttCapabilities.SUBSCRIBE_ONLY) : null; + binding != null ? binding.resolve(sessionId, topicFilter, MqttCapabilities.SUBSCRIBE_ONLY) : null; final int subscribeKey = subscribeKey(clientId.asString(), resolved.id); final MqttSubscribeStream stream = subscribeStreams.get(subscribeKey); @@ -2133,7 +2185,7 @@ private void sendUnsuback( } filtersByStream.forEach( - (stream, filters) -> stream.doSubscribeFlushOrEnd(traceId, authorization, filters)); + (stream, filters) -> stream.doSubscribeFlushOrEnd(traceId, filters)); if (!adminUnsubscribe) { final OctetsFW encodePayload = octetsRO.wrap(encodeBuffer, encodeOffset, encodeProgress); @@ -2164,7 +2216,7 @@ private void onDecodeDisconnect( disconnect.reasonCode() == DISCONNECT_WITH_WILL_MESSAGE ? MqttEndReasonCode.DISCONNECT_WITH_WILL : MqttEndReasonCode.DISCONNECT)); - sessionStream.doSessionAppEnd(traceId, authorization, builder.build()); + sessionStream.doSessionAppEnd(traceId, builder.build()); } closeStreams(traceId, authorization); doNetworkEnd(traceId, authorization); @@ -2181,7 +2233,7 @@ private void onDecodeError( closeStreams(traceId, authorization); break; default: - cleanupStreamsUsingAbort(traceId, authorization); + cleanupStreamsUsingAbort(traceId); break; } if (connected) @@ -2755,7 +2807,7 @@ private void decodeNetwork( if (MqttState.initialClosing(state)) { state = MqttState.closeInitial(state); - cleanupStreamsUsingAbort(traceId, authorization); + cleanupStreamsUsingAbort(traceId); doNetworkEnd(traceId, authorization); } } @@ -2770,21 +2822,20 @@ private void cleanupNetwork( long traceId, long authorization) { - cleanupStreamsUsingAbort(traceId, authorization); + cleanupStreamsUsingAbort(traceId); doNetworkReset(traceId, authorization); doNetworkAbort(traceId, authorization); } private void cleanupStreamsUsingAbort( - long traceId, - long authorization) + long traceId) { - publishStreams.values().forEach(s -> s.cleanupAbort(traceId, authorization)); - subscribeStreams.values().forEach(s -> s.cleanupAbort(traceId, authorization)); + publishStreams.values().forEach(s -> s.cleanupAbort(traceId)); + subscribeStreams.values().forEach(s -> s.cleanupAbort(traceId)); if (sessionStream != null) { - sessionStream.cleanupAbort(traceId, authorization); + sessionStream.cleanupAbort(traceId); } } @@ -2792,11 +2843,11 @@ private void closeStreams( long traceId, long authorization) { - publishStreams.values().forEach(s -> s.doPublishAppEnd(traceId, authorization)); - subscribeStreams.values().forEach(s -> s.doSubscribeAppEnd(traceId, authorization)); + publishStreams.values().forEach(s -> s.doPublishAppEnd(traceId)); + subscribeStreams.values().forEach(s -> s.doSubscribeAppEnd(traceId)); if (sessionStream != null) { - sessionStream.cleanupEnd(traceId, authorization); + sessionStream.cleanupEnd(traceId); } } @@ -3045,7 +3096,7 @@ private void onSessionWindow( if (MqttState.initialClosing(state) && !MqttState.initialClosed(state)) { - doSessionAppEnd(traceId, authorization, EMPTY_OCTETS); + doSessionAppEnd(traceId, EMPTY_OCTETS); } } @@ -3078,7 +3129,7 @@ private void onSessionReset( setInitialClosed(); decodeNetwork(traceId); - cleanupAbort(traceId, authorization); + cleanupAbort(traceId); } private void onSessionSignal( @@ -3101,7 +3152,7 @@ private void onSessionBegin( final long traceId = begin.traceId(); final long authorization = begin.authorization(); - doSessionWindow(traceId, authorization, encodeSlotOffset, encodeBudgetMax); + doSessionWindow(traceId, encodeSlotOffset, encodeBudgetMax); } private void onSessionData( @@ -3127,7 +3178,7 @@ private void onSessionData( if (replySeq > replyAck + replyMax) { - doSessionReset(traceId, authorization); + doSessionReset(traceId); doNetworkAbort(traceId, authorization); } else @@ -3146,7 +3197,7 @@ private void onSessionData( { if (cleanStart) { - doSessionData(traceId, authorization, 0, emptyRO); + doSessionData(traceId, 0, emptyRO); } else { @@ -3225,7 +3276,7 @@ private void onSessionData( } } - doSessionWindow(traceId, authorization, encodeSlotOffset, encodeBudgetMax); + doSessionWindow(traceId, encodeSlotOffset, encodeBudgetMax); } } @@ -3247,14 +3298,12 @@ private void onSessionAbort( setReplyClosed(); final long traceId = abort.traceId(); - final long authorization = abort.authorization(); - cleanupAbort(traceId, authorization); + cleanupAbort(traceId); } private void doSessionBegin( long traceId, - long authorization, long affinity, Flyweight beginEx) { @@ -3265,15 +3314,14 @@ private void doSessionBegin( application = newStream(this::onSession, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, beginEx); + traceId, sessionId, affinity, beginEx); - doSessionWindow(traceId, authorization, 0, 0); + doSessionWindow(traceId, 0, 0); } } private void doSessionData( long traceId, - long authorization, int reserved, Flyweight sessionState) { @@ -3288,7 +3336,7 @@ private void doSessionData( if (!MqttState.closed(state)) { doData(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_OCTETS); + traceId, sessionId, budgetId, reserved, buffer, offset, length, EMPTY_OCTETS); initialSeq += reserved; assert initialSeq <= initialAck + initialMax; @@ -3296,36 +3344,32 @@ private void doSessionData( } private void cleanupAbort( - long traceId, - long authorization) + long traceId) { - doSessionAbort(traceId, authorization); - doSessionReset(traceId, authorization); + doSessionAbort(traceId); + doSessionReset(traceId); } private void cleanupEnd( - long traceId, - long authorization) + long traceId) { - doSessionAppEnd(traceId, authorization, EMPTY_OCTETS); + doSessionAppEnd(traceId, EMPTY_OCTETS); } private void doSessionAbort( - long traceId, - long authorization) + long traceId) { if (!MqttState.initialClosed(state)) { setInitialClosed(); doAbort(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, EMPTY_OCTETS); + traceId, sessionId, EMPTY_OCTETS); } } private void doSessionAppEnd( long traceId, - long authorization, Flyweight extension) { if (MqttState.initialOpening(state) && !MqttState.initialClosed(state)) @@ -3333,14 +3377,13 @@ private void doSessionAppEnd( setReplyClosed(); doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, extension); + traceId, sessionId, extension); sessionStream = null; } } private void doSessionWindow( long traceId, - long authorization, int minReplyNoAck, int minReplyMax) { @@ -3356,21 +3399,20 @@ private void doSessionWindow( replyMax = minReplyMax; doWindow(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, encodeBudgetId, PUBLISH_FRAMING); + traceId, sessionId, encodeBudgetId, PUBLISH_FRAMING); } } } private void doSessionReset( - long traceId, - long authorization) + long traceId) { if (!MqttState.replyClosed(state)) { setReplyClosed(); doReset(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, EMPTY_OCTETS); + traceId, sessionId, EMPTY_OCTETS); } } @@ -3457,7 +3499,6 @@ private class MqttPublishStream private void doPublishBegin( long traceId, - long authorization, long affinity) { if (!MqttState.initialOpening(state)) @@ -3476,16 +3517,15 @@ private void doPublishBegin( .build(); application = newStream(this::onPublish, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, beginEx); + traceId, sessionId, affinity, beginEx); doSignalPublishExpiration(); - doPublishWindow(traceId, authorization, 0, 0); + doPublishWindow(traceId, 0, 0); } } private void doPublishData( long traceId, - long authorization, int reserved, OctetsFW payload, Flyweight extension) @@ -3499,7 +3539,7 @@ private void doPublishData( assert reserved >= length + initialPad; doData(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved, buffer, offset, length, extension); + traceId, sessionId, budgetId, reserved, buffer, offset, length, extension); initialSeq += reserved; assert initialSeq <= initialAck + initialMax; @@ -3508,15 +3548,14 @@ private void doPublishData( } private void doPublishAbort( - long traceId, - long authorization) + long traceId) { if (!MqttState.initialClosed(state)) { setPublishNetClosed(); doAbort(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, EMPTY_OCTETS); + traceId, sessionId, EMPTY_OCTETS); } } @@ -3565,10 +3604,8 @@ private void onPublishBegin( state = MqttState.openingReply(state); final long traceId = begin.traceId(); - final long authorization = begin.authorization(); - - doPublishWindow(traceId, authorization, encodeSlotOffset, encodeBudgetMax); + doPublishWindow(traceId, encodeSlotOffset, encodeBudgetMax); } private void onPublishData( @@ -3591,13 +3628,13 @@ private void onPublishData( if (replySeq > replyAck + replyMax) { - doPublishReset(traceId, authorization); + doPublishReset(traceId); doNetworkAbort(traceId, authorization); } else { droppedHandler.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); - doPublishWindow(traceId, authorization, encodeSlotOffset, encodeBudgetMax); + doPublishWindow(traceId, encodeSlotOffset, encodeBudgetMax); } } @@ -3613,9 +3650,8 @@ private void onPublishAbort( setPublishAppClosed(); final long traceId = abort.traceId(); - final long authorization = abort.authorization(); - cleanupAbort(traceId, authorization); + cleanupAbort(traceId); } private void onPublishWindow( @@ -3651,7 +3687,7 @@ private void onPublishWindow( if (MqttState.initialClosing(state)) { - doPublishAppEnd(traceId, authorization); + doPublishAppEnd(traceId); } else if (decodePublisherKey == topicKey) { @@ -3674,7 +3710,7 @@ private void onPublishReset( } decodeNetwork(traceId); - cleanupAbort(traceId, authorization); + cleanupAbort(traceId); } private void onPublishSignal( @@ -3696,12 +3732,11 @@ private void onPublishExpiredSignal( SignalFW signal) { final long traceId = signal.traceId(); - final long authorization = signal.authorization(); final long now = System.currentTimeMillis(); if (now >= publishExpiresAt) { - doPublishAppEnd(traceId, authorization); + doPublishAppEnd(traceId); } else { @@ -3732,7 +3767,6 @@ private void doCancelPublishExpiration() private void doPublishWindow( long traceId, - long authorization, int minReplyNoAck, int minReplyMax) { @@ -3750,33 +3784,31 @@ private void doPublishWindow( state = MqttState.openReply(state); doWindow(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, encodeBudgetId, PUBLISH_FRAMING); + traceId, sessionId, encodeBudgetId, PUBLISH_FRAMING); } } } private void doPublishReset( - long traceId, - long authorization) + long traceId) { if (!MqttState.replyClosed(state)) { setPublishAppClosed(); doReset(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, EMPTY_OCTETS); + traceId, sessionId, EMPTY_OCTETS); } } private void doPublishAppEnd( - long traceId, - long authorization) + long traceId) { if (!MqttState.initialClosed(state)) { doCancelPublishExpiration(); doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, EMPTY_OCTETS); + traceId, sessionId, EMPTY_OCTETS); } } @@ -3812,11 +3844,10 @@ private void setPublishAppClosed() } private void cleanupAbort( - long traceId, - long authorization) + long traceId) { - doPublishAbort(traceId, authorization); - doPublishReset(traceId, authorization); + doPublishAbort(traceId); + doPublishReset(traceId); doCancelPublishExpiration(); } } @@ -3869,7 +3900,6 @@ private Optional getSubscriptionByFilter(String filter) private void doSubscribeBeginOrFlush( long traceId, - long authorization, long affinity, int clientKey, List subscriptions) @@ -3879,17 +3909,16 @@ private void doSubscribeBeginOrFlush( if (!MqttState.initialOpening(state)) { - doSubscribeBegin(traceId, authorization, affinity); + doSubscribeBegin(traceId, affinity); } else { - doSubscribeFlush(traceId, authorization, 0, subscriptions); + doSubscribeFlush(traceId, 0, subscriptions); } } private void doSubscribeBegin( long traceId, - long authorization, long affinity) { assert state == 0; @@ -3912,19 +3941,18 @@ private void doSubscribeBegin( .build(); application = newStream(this::onSubscribe, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, beginEx); + traceId, sessionId, affinity, beginEx); - doSubscribeWindow(traceId, authorization, 0, 0); + doSubscribeWindow(traceId, 0, 0); } private void doSubscribeFlush( long traceId, - long authorization, int reserved, List newSubscriptions) { doFlush(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, 0L, reserved, + traceId, sessionId, 0L, reserved, ex -> ex.set((b, o, l) -> mqttFlushExRW.wrap(b, o, l) .typeId(mqttTypeId) .subscribe(subscribeBuilder -> @@ -3949,7 +3977,7 @@ private void doSubscribeFlush( subscriptionPayload[i] = SUCCESS; } - doEncodeSuback(traceId, authorization, packetId, subscriptionPayload); + doEncodeSuback(traceId, sessionId, packetId, subscriptionPayload); } initialSeq += reserved; @@ -3958,7 +3986,6 @@ private void doSubscribeFlush( private void doSubscribeFlushOrEnd( long traceId, - long authorization, List unsubscribedPatterns) { this.subscriptions.removeIf(subscription -> unsubscribedPatterns.contains(subscription.filter)); @@ -3970,25 +3997,24 @@ private void doSubscribeFlushOrEnd( { if (subscriptions.isEmpty()) { - doSubscribeAppEnd(traceId, authorization); + doSubscribeAppEnd(traceId); } else { - doSubscribeFlush(traceId, authorization, 0, null); + doSubscribeFlush(traceId, 0, null); } } } private void doSubscribeAbort( - long traceId, - long authorization) + long traceId) { if (!MqttState.initialClosed(state)) { setNetClosed(); doAbort(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, EMPTY_OCTETS); + traceId, sessionId, EMPTY_OCTETS); } } @@ -4055,12 +4081,12 @@ private void onSubscribeBegin( if (!acknowledged) { - doSubscribeWindow(traceId, authorization, encodeSlotOffset, encodeBudgetMax); + doSubscribeWindow(traceId, encodeSlotOffset, encodeBudgetMax); acknowledged = true; } else { - doSubscribeWindow(traceId, authorization, 0, replyMax); + doSubscribeWindow(traceId, 0, replyMax); } } @@ -4088,7 +4114,7 @@ private void onSubscribeData( if (replySeq > replyAck + replyMax) { - doSubscribeReset(traceId, authorization); + doSubscribeReset(traceId); doNetworkAbort(traceId, authorization); } else @@ -4101,7 +4127,7 @@ private void onSubscribeData( { droppedHandler.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); } - doSubscribeWindow(traceId, authorization, encodeSlotOffset, encodeBudgetMax); + doSubscribeWindow(traceId, encodeSlotOffset, encodeBudgetMax); } } @@ -4121,7 +4147,7 @@ private void onSubscribeReset( // } decodeNetwork(traceId); - cleanupAbort(traceId, authorization); + cleanupAbort(traceId); } private void onSubscribeWindow( @@ -4194,7 +4220,7 @@ private void onSubscribeWindow( if (MqttState.initialClosing(state) && !MqttState.initialClosed(state)) { - doSubscribeAppEnd(traceId, authorization); + doSubscribeAppEnd(traceId); } } @@ -4218,21 +4244,19 @@ private void onSubscribeAbort( final long traceId = abort.traceId(); final long authorization = abort.authorization(); - cleanupAbort(traceId, authorization); + cleanupAbort(traceId); } private void cleanupAbort( - long traceId, - long authorization) + long traceId) { - doSubscribeAbort(traceId, authorization); - doSubscribeReset(traceId, authorization); + doSubscribeAbort(traceId); + doSubscribeReset(traceId); } private void doSubscribeWindow( long traceId, - long authorization, int minReplyNoAck, int minReplyMax) { @@ -4248,13 +4272,12 @@ private void doSubscribeWindow( state = MqttState.openReply(state); doWindow(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, encodeBudgetId, PUBLISH_FRAMING); + traceId, sessionId, encodeBudgetId, PUBLISH_FRAMING); } } private void doSubscribeReset( - long traceId, - long authorization) + long traceId) { if (!MqttState.replyClosed(state)) { @@ -4264,36 +4287,17 @@ private void doSubscribeReset( setSubscribeAppClosed(); doReset(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, EMPTY_OCTETS); + traceId, sessionId, EMPTY_OCTETS); } } private void doSubscribeAppEnd( - long traceId, - long authorization) + long traceId) { if (MqttState.initialOpening(state) && !MqttState.initialClosed(state)) { doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, EMPTY_OCTETS); - } - } - - private void setSubscribeNetClosed() - { - assert !MqttState.initialClosed(state); - - state = MqttState.closeInitial(state); - - if (debitorIndex != NO_DEBITOR_INDEX) - { - debitor.release(debitorIndex, initialId); - debitorIndex = NO_DEBITOR_INDEX; - } - - if (MqttState.closed(state)) - { - subscribeStreams.remove(clientKey); + traceId, sessionId, EMPTY_OCTETS); } } @@ -4795,5 +4799,21 @@ private int calculatePublishApplicationFlags( return flags; } } + + private GuardHandler resolveGuard( + MqttOptionsConfig options, + ToLongFunction resolveId) + { + GuardHandler guard = null; + + if (options != null && + options.authorization != null) + { + long guardId = resolveId.applyAsLong(options.authorization.name); + guard = supplyGuard.apply(guardId); + } + + return guard; + } } diff --git a/incubator/binding-mqtt/src/main/moditect/module-info.java b/incubator/binding-mqtt/src/main/moditect/module-info.java index 2a88753954..d7e5310ba2 100644 --- a/incubator/binding-mqtt/src/main/moditect/module-info.java +++ b/incubator/binding-mqtt/src/main/moditect/module-info.java @@ -20,6 +20,9 @@ provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBindingFactorySpi; + provides io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi + with io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttOptionsConfigAdapter; + provides io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi with io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttConditionConfigAdapter; } diff --git a/incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi b/incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi new file mode 100644 index 0000000000..9ef2b8ec8c --- /dev/null +++ b/incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi @@ -0,0 +1 @@ +io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttOptionsConfigAdapter diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java new file mode 100644 index 0000000000..85a208afd7 --- /dev/null +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.config; + +import static java.util.Collections.singletonList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; +import jakarta.json.bind.JsonbConfig; + +import org.junit.Before; +import org.junit.Test; + +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttCredentialsConfig; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttPatternConfig; + +public class MqttOptionsConfigAdapterTest +{ + private Jsonb jsonb; + + @Before + public void initJson() + { + JsonbConfig config = new JsonbConfig() + .withAdapters(new MqttOptionsConfigAdapter()); + jsonb = JsonbBuilder.create(config); + } + + @Test + public void shouldReadOptions() + { + String text = + "{" + + "\"authorization\":" + + "{" + + "\"test0\":" + + "{" + + "\"credentials\":" + + "{" + + "\"connect\":" + + "{" + + "\"username\":\"Bearer {credentials}\"" + + "}" + + "}" + + "}" + + "}," + + "}"; + + MqttOptionsConfig options = jsonb.fromJson(text, MqttOptionsConfig.class); + + assertThat(options, not(nullValue())); + assertThat(options.authorization, not(nullValue())); + assertThat(options.authorization.name, equalTo("test0")); + assertThat(options.authorization.credentials, not(nullValue())); + assertThat(options.authorization.credentials.connect, not(nullValue())); + assertThat(options.authorization.credentials.connect, hasSize(1)); + assertThat(options.authorization.credentials.connect.get(0), not(nullValue())); + assertThat(options.authorization.credentials.connect.get(0).property, + equalTo(MqttAuthorizationConfig.MqttConnectProperty.USERNAME)); + assertThat(options.authorization.credentials.connect.get(0).pattern, + equalTo("Bearer {credentials}")); + + } + + @Test + public void shouldWriteOptions() + { + MqttOptionsConfig options = new MqttOptionsConfig( + new MqttAuthorizationConfig( + "test0", + new MqttCredentialsConfig( + singletonList(new MqttPatternConfig( + MqttAuthorizationConfig.MqttConnectProperty.USERNAME, + "Bearer {credentials}"))))); + + String text = jsonb.toJson(options); + + assertThat(text, not(nullValue())); + assertThat(text, equalTo( + "{" + + + "\"authorization\":" + + "{" + + "\"test0\":" + + "{" + + "\"credentials\":" + + "{" + + "\"connect\":" + + "{" + + "\"username\":\"Bearer {credentials}\"" + + "}" + + "}" + + "}" + + "}" + + "}")); + } +} diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java index 7c671c7208..dedc52e7bd 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java @@ -77,6 +77,63 @@ public void shouldConnect() throws Exception k3po.finish(); } + @Test + @Configuration("server.credentials.username.yaml") + @Specification({ + "${net}/connect.username.authentication.successful/client", + "${app}/connect.authorize.publish.one.message/server"}) + @Configure(name = SESSION_AVAILABLE_NAME, value = "false") + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") + public void shouldAuthenticateUsernameAndConnect() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("server.credentials.username.yaml") + @Specification({ + "${net}/connect.username.authentication.failed/client"}) + @Configure(name = SESSION_AVAILABLE_NAME, value = "false") + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") + public void shouldFailUsernameAuthentication() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("server.credentials.password.yaml") + @Specification({ + "${net}/connect.password.authentication.successful/client"}) + @Configure(name = SESSION_AVAILABLE_NAME, value = "false") + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") + public void shouldAuthenticatePasswordAndConnect() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("server.credentials.password.yaml") + @Specification({ + "${net}/connect.password.authentication.failed/client"}) + @Configure(name = SESSION_AVAILABLE_NAME, value = "false") + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") + public void shouldFailPasswordAuthentication() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.yaml") @Specification({ From 8f63639edf43804fbfb004ed8035f75df4269e4c Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 27 Jul 2023 11:39:33 -0700 Subject: [PATCH 005/115] Update CHANGELOG.md --- CHANGELOG.md | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 26cb95e83c..35cd8d74de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,71 @@ # Changelog +## [Unreleased](https://github.com/aklivity/zilla/tree/HEAD) + +[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.51...HEAD) + +**Closed issues:** + +- Add guard support for MQTT binding [\#308](https://github.com/aklivity/zilla/issues/308) +- Implement retained feature for mqtt-kafka [\#289](https://github.com/aklivity/zilla/issues/289) + +## [0.9.51](https://github.com/aklivity/zilla/tree/0.9.51) (2023-07-27) + +[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.50...0.9.51) + +**Implemented enhancements:** + +- Enhance `tcp` binding to route by `port` [\#294](https://github.com/aklivity/zilla/issues/294) +- Integrate OpenTelemetry collectors by exporting local metrics over OTLP [\#112](https://github.com/aklivity/zilla/issues/112) + +**Closed issues:** + +- Add redirect, server reference support to mqtt binding [\#302](https://github.com/aklivity/zilla/issues/302) +- Add options to mqtt-kafa binding so we can change kafka topics [\#300](https://github.com/aklivity/zilla/issues/300) + +**Merged pull requests:** + +- Fix kafka cache cursor buffer copy [\#317](https://github.com/aklivity/zilla/pull/317) ([bmaidics](https://github.com/bmaidics)) +- README Formatting and wording changes [\#306](https://github.com/aklivity/zilla/pull/306) ([vordimous](https://github.com/vordimous)) +- Readme overhaul [\#305](https://github.com/aklivity/zilla/pull/305) ([llukyanov](https://github.com/llukyanov)) +- Support for tcp binding to route by port numbers [\#299](https://github.com/aklivity/zilla/pull/299) ([lukefallows](https://github.com/lukefallows)) +- Create OpenTelemetry exporter and refactor code [\#279](https://github.com/aklivity/zilla/pull/279) ([attilakreiner](https://github.com/attilakreiner)) + +## [0.9.50](https://github.com/aklivity/zilla/tree/0.9.50) (2023-07-14) + +[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.49...0.9.50) + +**Implemented enhancements:** + +- `kubernetes autoscaling` feature [\#189](https://github.com/aklivity/zilla/issues/189) + +**Closed issues:** + +- Update image base [\#291](https://github.com/aklivity/zilla/issues/291) + +**Merged pull requests:** + +- update Helm logo & details and clean up README [\#273](https://github.com/aklivity/zilla/pull/273) ([vordimous](https://github.com/vordimous)) + +## [0.9.49](https://github.com/aklivity/zilla/tree/0.9.49) (2023-06-28) + +[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.48...0.9.49) + +**Implemented enhancements:** + +- Kafka merged flush should support filter changes [\#259](https://github.com/aklivity/zilla/issues/259) + +**Closed issues:** + +- Null pointer when Headers are null [\#281](https://github.com/aklivity/zilla/issues/281) + +**Merged pull requests:** + +- Remove unnecessary cursor assignment [\#288](https://github.com/aklivity/zilla/pull/288) ([akrambek](https://github.com/akrambek)) +- Eliminate zilla.json warning if file not present [\#286](https://github.com/aklivity/zilla/pull/286) ([jfallows](https://github.com/jfallows)) +- Send kafka flush even if data frames were sent to notify client from HISTORICAL to LIVE transition [\#284](https://github.com/aklivity/zilla/pull/284) ([bmaidics](https://github.com/bmaidics)) +- KafkaMerged acknowledge flush frame [\#258](https://github.com/aklivity/zilla/pull/258) ([bmaidics](https://github.com/bmaidics)) + ## [0.9.48](https://github.com/aklivity/zilla/tree/0.9.48) (2023-06-23) [Full Changelog](https://github.com/aklivity/zilla/compare/0.9.47...0.9.48) From 80e20912380c23f62040dd01d50a0f0da3a56919 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Fri, 28 Jul 2023 09:00:49 -0700 Subject: [PATCH 006/115] Support Kafka consumer groups (#262) --- .../command/log/internal/LoggableStream.java | 32 + .../kafka/internal/KafkaConfiguration.java | 77 + .../internal/config/KafkaBindingConfig.java | 13 +- .../internal/config/KafkaConditionConfig.java | 5 +- .../config/KafkaConditionConfigAdapter.java | 12 +- .../config/KafkaConditionMatcher.java | 18 +- .../internal/config/KafkaRouteConfig.java | 5 +- .../stream/KafkaCacheClientFactory.java | 3 + .../stream/KafkaCacheGroupFactory.java | 1037 ++++++ .../stream/KafkaCacheServerFactory.java | 3 + .../internal/stream/KafkaClientFactory.java | 9 + .../stream/KafkaClientGroupFactory.java | 3010 +++++++++++++++++ .../stream/KafkaClientSaslHandshaker.java | 4 +- .../binding-kafka/src/main/zilla/protocol.idl | 140 +- .../KafkaConditionConfigAdapterTest.java | 8 +- .../kafka/internal/stream/CacheGroupIT.java | 68 + .../kafka/internal/stream/ClientGroupIT.java | 133 + .../internal/stream/ClientGroupSaslIT.java | 63 + .../kafka/schema/kafka.schema.patch.json | 7 +- .../client.rpt | 32 + .../server.rpt | 36 + .../application/group/leader/client.rpt | 41 + .../application/group/leader/server.rpt | 43 + .../client.rpt | 69 + .../server.rpt | 68 + .../rebalance.protocol.highlander/client.rpt | 54 + .../rebalance.protocol.highlander/server.rpt | 57 + .../rebalance.protocol.unknown/client.rpt | 57 + .../rebalance.protocol.unknown/server.rpt | 45 + .../client.rpt | 41 + .../server.rpt | 35 + .../coordinator.not.available/client.rpt | 150 + .../coordinator.not.available/server.rpt | 140 + .../client.rpt | 176 + .../server.rpt | 153 + .../client.rpt | 195 ++ .../server.rpt | 175 + .../client.rpt | 132 + .../server.rpt | 123 + .../rebalance.protocol.highlander/client.rpt | 221 ++ .../rebalance.protocol.highlander/server.rpt | 212 ++ .../rebalance.protocol.unknown/client.rpt | 108 + .../rebalance.protocol.unknown/server.rpt | 97 + .../rebalance.sync.group/client.rpt | 154 + .../rebalance.sync.group/server.rpt | 144 + .../leader/client.rpt | 165 + .../leader/server.rpt | 157 + .../kafka/streams/application/GroupIT.java | 84 + .../kafka/streams/network/GroupIT.java | 110 + .../kafka/streams/network/GroupSaslIT.java | 49 + 50 files changed, 7955 insertions(+), 15 deletions(-) create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheGroupIT.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupSaslIT.java create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.before.coordinator.response/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.before.coordinator.response/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt create mode 100644 specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java create mode 100644 specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java create mode 100644 specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupSaslIT.java diff --git a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java index f857dd303d..c8a8689afb 100644 --- a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java +++ b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java @@ -87,6 +87,8 @@ import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaFetchDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaFetchFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaFlushExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaGroupBeginExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaGroupDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedFlushExFW; @@ -890,6 +892,9 @@ private void onKafkaBeginEx( case KafkaBeginExFW.KIND_DESCRIBE: onKafkaDescribeBeginEx(offset, timestamp, kafkaBeginEx.describe()); break; + case KafkaBeginExFW.KIND_GROUP: + onKafkaGroupBeginEx(offset, timestamp, kafkaBeginEx.group()); + break; case KafkaBeginExFW.KIND_FETCH: onKafkaFetchBeginEx(offset, timestamp, kafkaBeginEx.fetch()); break; @@ -946,6 +951,19 @@ private void onKafkaDescribeBeginEx( configs.forEach(c -> out.printf(verboseFormat, index, offset, timestamp, c.asString())); } + private void onKafkaGroupBeginEx( + int offset, + long timestamp, + KafkaGroupBeginExFW group) + { + String16FW groupId = group.groupId(); + String16FW protocol = group.protocol(); + int timeout = group.timeout(); + + out.printf(verboseFormat, index, offset, timestamp, format("[group] %s %s %d", + groupId.asString(), protocol.asString(), timeout)); + } + private void onKafkaFetchBeginEx( int offset, long timestamp, @@ -1062,6 +1080,9 @@ private void onKafkaDataEx( case KafkaDataExFW.KIND_DESCRIBE: onKafkaDescribeDataEx(offset, timestamp, kafkaDataEx.describe()); break; + case KafkaDataExFW.KIND_GROUP: + onKafkaGroupDataEx(offset, timestamp, kafkaDataEx.group()); + break; case KafkaDataExFW.KIND_FETCH: onKafkaFetchDataEx(offset, timestamp, kafkaDataEx.fetch()); break; @@ -1089,6 +1110,17 @@ private void onKafkaDescribeDataEx( format("%s: %s", c.name().asString(), c.value().asString()))); } + private void onKafkaGroupDataEx( + int offset, + long timestamp, + KafkaGroupDataExFW group) + { + String16FW leader = group.leaderId(); + String16FW member = group.memberId(); + + out.printf(verboseFormat, index, offset, timestamp, format("[group] %s %s", leader.asString(), member.asString())); + } + private void onKafkaFetchDataEx( int offset, long timestamp, diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java index d9cf0a1d04..54dffe1320 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java @@ -20,9 +20,12 @@ import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; +import java.lang.reflect.Method; import java.math.BigInteger; import java.nio.file.Path; import java.security.SecureRandom; +import java.time.Duration; +import java.util.UUID; import java.util.function.Supplier; import org.agrona.LangUtil; @@ -63,6 +66,9 @@ public class KafkaConfiguration extends Configuration public static final IntPropertyDef KAFKA_CACHE_CLIENT_TRAILERS_SIZE_MAX; public static final IntPropertyDef KAFKA_CACHE_SERVER_RECONNECT_DELAY; public static final PropertyDef KAFKA_CLIENT_SASL_SCRAM_NONCE; + public static final PropertyDef KAFKA_CLIENT_GROUP_REBALANCE_TIMEOUT; + public static final PropertyDef KAFKA_CLIENT_ID; + public static final PropertyDef KAFKA_CLIENT_INSTANCE_ID; private static final ConfigurationDef KAFKA_CONFIG; @@ -100,6 +106,11 @@ public class KafkaConfiguration extends Configuration KAFKA_CACHE_CLIENT_TRAILERS_SIZE_MAX = config.property("cache.client.trailers.size.max", 256); KAFKA_CLIENT_SASL_SCRAM_NONCE = config.property(NonceSupplier.class, "client.sasl.scram.nonce", KafkaConfiguration::decodeNonceSupplier, KafkaConfiguration::defaultNonceSupplier); + KAFKA_CLIENT_GROUP_REBALANCE_TIMEOUT = config.property(Duration.class, "client.group.rebalance.timeout", + (c, v) -> Duration.parse(v), "PT4S"); + KAFKA_CLIENT_ID = config.property("client.id", "zilla"); + KAFKA_CLIENT_INSTANCE_ID = config.property(InstanceIdSupplier.class, "client.instance.id", + KafkaConfiguration::decodeInstanceId, KafkaConfiguration::defaultInstanceId); KAFKA_CONFIG = config; } @@ -248,6 +259,16 @@ public int cacheClientTrailersSizeMax() return KAFKA_CACHE_CLIENT_TRAILERS_SIZE_MAX.getAsInt(this); } + public String clientId() + { + return KAFKA_CLIENT_ID.get(this); + } + + public Duration clientGroupRebalanceTimeout() + { + return KAFKA_CLIENT_GROUP_REBALANCE_TIMEOUT.get(this); + } + private static Path cacheDirectory( Configuration config, String cacheDirectory) @@ -267,6 +288,11 @@ public Supplier nonceSupplier() return KAFKA_CLIENT_SASL_SCRAM_NONCE.get(this)::get; } + public Supplier clientInstanceIdSupplier() + { + return KAFKA_CLIENT_INSTANCE_ID.get(this)::get; + } + @FunctionalInterface private interface NonceSupplier { @@ -315,4 +341,55 @@ private static NonceSupplier defaultNonceSupplier( return () -> new BigInteger(130, new SecureRandom()).toString(Character.MAX_RADIX); } + + @FunctionalInterface + public interface InstanceIdSupplier extends Supplier + { + } + + private static InstanceIdSupplier decodeInstanceId( + Configuration config, + String value) + { + try + { + String className = value.substring(0, value.indexOf("$$Lambda")); + Class lambdaClass = Class.forName(className); + + Method targetMethod = null; + for (Method method : lambdaClass.getDeclaredMethods()) + { + if (method.isSynthetic()) + { + targetMethod = method; + break; + } + } + + Method finalTargetMethod = targetMethod; + return () -> + { + try + { + finalTargetMethod.setAccessible(true); + return (String) finalTargetMethod.invoke(null); + } + catch (Exception e) + { + throw new RuntimeException("Failed to invoke the lambda method.", e); + } + }; + } + catch (Throwable ex) + { + LangUtil.rethrowUnchecked(ex); + } + return null; + } + + private static InstanceIdSupplier defaultInstanceId( + Configuration config) + { + return () -> String.format("%s-%s", KAFKA_CLIENT_ID.get(config), UUID.randomUUID()); + } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java index df6aac76cc..ff9e7c6d0e 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java @@ -45,7 +45,18 @@ public KafkaRouteConfig resolve( String topic) { return routes.stream() - .filter(r -> r.authorized(authorization) && r.matches(topic)) + .filter(r -> r.authorized(authorization) && r.matches(topic, null)) + .findFirst() + .orElse(null); + } + + public KafkaRouteConfig resolve( + long authorization, + String topic, + String groupId) + { + return routes.stream() + .filter(r -> r.authorized(authorization) && r.matches(topic, groupId)) .findFirst() .orElse(null); } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfig.java index d217d16ea9..0d222e179d 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfig.java @@ -20,10 +20,13 @@ public final class KafkaConditionConfig extends ConditionConfig { public final String topic; + public final String groupId; public KafkaConditionConfig( - String topic) + String topic, + String groupId) { this.topic = topic; + this.groupId = groupId; } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapter.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapter.java index 351d45ce06..46e3cdd2e5 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapter.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapter.java @@ -27,6 +27,7 @@ public final class KafkaConditionConfigAdapter implements ConditionConfigAdapterSpi, JsonbAdapter { private static final String TOPIC_NAME = "topic"; + private static final String GROUP_ID_NAME = "groupId"; @Override public String type() @@ -47,6 +48,11 @@ public JsonObject adaptToJson( object.add(TOPIC_NAME, kafkaCondition.topic); } + if (kafkaCondition.groupId != null) + { + object.add(GROUP_ID_NAME, kafkaCondition.groupId); + } + return object.build(); } @@ -58,6 +64,10 @@ public ConditionConfig adaptFromJson( ? object.getString(TOPIC_NAME) : null; - return new KafkaConditionConfig(topic); + String groupId = object.containsKey(GROUP_ID_NAME) + ? object.getString(GROUP_ID_NAME) + : null; + + return new KafkaConditionConfig(topic, groupId); } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionMatcher.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionMatcher.java index 37bf8d0e85..329e70b898 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionMatcher.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionMatcher.java @@ -21,17 +21,20 @@ public final class KafkaConditionMatcher { private final Matcher topicMatch; + private final Matcher grouoIdMatch; public KafkaConditionMatcher( KafkaConditionConfig condition) { this.topicMatch = condition.topic != null ? asMatcher(condition.topic) : null; + this.grouoIdMatch = condition.groupId != null ? asMatcher(condition.groupId) : null; } public boolean matches( - String topic) + String topic, + String groupId) { - return matchesTopic(topic); + return matchesTopic(topic) && matchesGroupId(groupId); } private boolean matchesTopic( @@ -40,9 +43,18 @@ private boolean matchesTopic( return this.topicMatch == null || this.topicMatch.reset(topic).matches(); } + private boolean matchesGroupId( + String groupId) + { + return this.grouoIdMatch == null || this.grouoIdMatch.reset(groupId).matches(); + } + private static Matcher asMatcher( String wildcard) { - return Pattern.compile(wildcard.replace(".", "\\.").replace("*", ".*")).matcher(""); + return Pattern.compile(wildcard + .replace(".", "\\.") + .replace("*", ".*")) + .matcher(""); } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java index 8949acfc3c..83805da27a 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java @@ -50,8 +50,9 @@ boolean authorized( } boolean matches( - String topic) + String topic, + String groupId) { - return when.isEmpty() || when.stream().anyMatch(m -> m.matches(topic)); + return when.isEmpty() || when.stream().anyMatch(m -> m.matches(topic, groupId)); } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java index 8833175c5e..de57588c10 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java @@ -64,6 +64,8 @@ public KafkaCacheClientFactory( final KafkaCacheClientDescribeFactory cacheDescribeFactory = new KafkaCacheClientDescribeFactory( config, context, bindings::get, supplyCacheRoute); + final KafkaCacheGroupFactory cacheGroupFactory = new KafkaCacheGroupFactory(config, context, bindings::get); + final KafkaCacheClientFetchFactory cacheFetchFactory = new KafkaCacheClientFetchFactory( config, context, bindings::get, accountant::supplyDebitor, supplyCache, supplyCacheRoute); @@ -76,6 +78,7 @@ public KafkaCacheClientFactory( final Int2ObjectHashMap factories = new Int2ObjectHashMap<>(); factories.put(KafkaBeginExFW.KIND_META, cacheMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, cacheDescribeFactory); + factories.put(KafkaBeginExFW.KIND_GROUP, cacheGroupFactory); factories.put(KafkaBeginExFW.KIND_FETCH, cacheFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, cacheProduceFactory); factories.put(KafkaBeginExFW.KIND_MERGED, cacheMergedFactory); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java new file mode 100644 index 0000000000..c27fe29960 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java @@ -0,0 +1,1037 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; + +public final class KafkaCacheGroupFactory implements BindingHandler +{ + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final FlushFW flushRO = new FlushFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final BufferPool bufferPool; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongFunction supplyBinding; + + public KafkaCacheGroupFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBinding = supplyBinding; + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_GROUP; + final KafkaGroupBeginExFW kafkaGroupBeginEx = kafkaBeginEx.group(); + final String groupId = kafkaGroupBeginEx.groupId().asString(); + final String protocol = kafkaGroupBeginEx.protocol().asString(); + final int timeout = kafkaGroupBeginEx.timeout(); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, null, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + newStream = new KafkaCacheGroupApp( + sender, + originId, + routedId, + initialId, + affinity, + authorization, + resolvedId, + groupId, + protocol, + timeout)::onGroupMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Consumer extension) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaCacheGroupNet + { + private final long originId; + private final long routedId; + private final long authorization; + private final KafkaCacheGroupApp delegate; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private long initialBud; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaCacheGroupNet( + KafkaCacheGroupApp delegate, + long originId, + long routedId, + long authorization) + { + this.delegate = delegate; + this.originId = originId; + this.routedId = routedId; + this.receiver = MessageConsumer.NOOP; + this.authorization = authorization; + } + + private void doGroupInitialBegin( + long traceId) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + if (KafkaConfiguration.DEBUG) + { + System.out.format("%s GroupId connect\n", delegate.groupId); + } + + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onGroupMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, 0L, + ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .group(g -> g.groupId(delegate.groupId) + .protocol(delegate.protocol) + .timeout(delegate.timeout)) + .build() + .sizeof())); + state = KafkaState.openingInitial(state); + } + } + + private void doGroupInitialData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doGroupInitialFlush( + long traceId) + { + doFlush(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, initialBud, 0, EMPTY_EXTENSION); + } + + private void doGroupInitialEnd( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doGroupInitialAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void onGroupInitialReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + state = KafkaState.closedInitial(state); + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doGroupInitialReset(traceId); + + doGroupReplyReset(traceId); + } + + + private void onGroupInitialWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + initialBud = budgetId; + state = KafkaState.openedInitial(state); + + assert initialAck <= initialSeq; + + delegate.doGroupInitialWindow(authorization, traceId, budgetId, padding); + } + + private void onGroupMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onGroupReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onGroupReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onGroupReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onGroupReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onGroupInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onGroupInitialWindow(window); + break; + default: + break; + } + } + + private void onGroupReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + delegate.doGroupReplyBegin(traceId); + } + + private void onGroupReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + delegate.doGroupReplyData(traceId, flags, reserved, payload, extension); + } + + private void onGroupReplyEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doGroupReplyEnd(traceId); + } + + private void onGroupReplyAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doGroupReplyAbort(traceId); + } + + private void doGroupReplyReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization); + + state = KafkaState.closedReply(state); + } + } + + private void doGroupReplyWindow( + long traceId, + long authorization, + long budgetId, + int padding) + { + replyAck = Math.max(delegate.replyAck - replyPad, 0); + replyMax = delegate.replyMax; + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding + replyPad); + } + } + + private final class KafkaCacheGroupApp + { + private final KafkaCacheGroupNet group; + private final MessageConsumer sender; + private final String groupId; + private final String protocol; + private final int timeout; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + private long replyBudgetId; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private long replyBud; + private int replyCap; + + KafkaCacheGroupApp( + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization, + long resolvedId, + String groupId, + String protocol, + int timeout) + { + this.group = new KafkaCacheGroupNet(this, routedId, resolvedId, authorization); + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + this.groupId = groupId; + this.protocol = protocol; + this.timeout = timeout; + } + + private void onGroupMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onGroupInitialBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onGroupInitialData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onGroupInitialEnd(end); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onGroupInitialFlush(flush); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onGroupInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onGroupReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onGroupReplyReset(reset); + break; + default: + break; + } + } + + private void onGroupInitialBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + state = KafkaState.openingInitial(state); + + assert initialAck <= initialSeq; + + group.doGroupInitialBegin(traceId); + } + + private void onGroupInitialData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + + group.doGroupInitialData(traceId, authorization, budgetId, reserved, flags, payload, extension); + } + + private void onGroupInitialEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + group.doGroupInitialEnd(traceId); + } + + private void onGroupInitialFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + group.doGroupInitialFlush(traceId); + } + + private void onGroupInitialAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + group.doGroupInitialAbort(traceId); + } + + private void doGroupInitialReset( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization); + } + + state = KafkaState.closedInitial(state); + } + + private void doGroupInitialWindow( + long authorization, + long traceId, + long budgetId, + int padding) + { + initialAck = group.initialAck; + initialMax = group.initialMax; + + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding); + } + + private void doGroupReplyBegin( + long traceId) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + private void doGroupReplyData( + long traceId, + int flag, + int reserved, + OctetsFW payload, + Flyweight extension) + { + + doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, flag, reserved, payload, extension); + + replySeq += reserved; + } + + private void doGroupReplyEnd( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void doGroupReplyAbort( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void onGroupReplyReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final int maximum = reset.maximum(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + cleanup(traceId); + } + + private void onGroupReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyBud = budgetId; + replyPad = padding; + replyCap = capabilities; + state = KafkaState.openedReply(state); + + assert replyAck <= replySeq; + + group.doGroupReplyWindow(traceId, acknowledge, budgetId, padding); + } + + private void cleanup( + long traceId) + { + doGroupInitialReset(traceId); + doGroupReplyAbort(traceId); + + group.doGroupInitialAbort(traceId); + group.doGroupReplyReset(traceId); + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java index f9950aabb7..1d6ea15226 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java @@ -67,6 +67,8 @@ public KafkaCacheServerFactory( final KafkaCacheServerDescribeFactory cacheDescribeFactory = new KafkaCacheServerDescribeFactory( config, context, bindings::get, supplyCache, supplyCacheRoute); + final KafkaCacheGroupFactory cacheGroupFactory = new KafkaCacheGroupFactory(config, context, bindings::get); + final KafkaCacheServerFetchFactory cacheFetchFactory = new KafkaCacheServerFetchFactory( config, context, bindings::get, supplyCache, supplyCacheRoute); @@ -76,6 +78,7 @@ public KafkaCacheServerFactory( factories.put(KafkaBeginExFW.KIND_BOOTSTRAP, cacheBootstrapFactory); factories.put(KafkaBeginExFW.KIND_META, cacheMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, cacheDescribeFactory); + factories.put(KafkaBeginExFW.KIND_GROUP, cacheGroupFactory); factories.put(KafkaBeginExFW.KIND_FETCH, cacheFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, cacheProduceFactory); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java index 442e015c50..10adeb7c4a 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java @@ -58,6 +58,9 @@ public KafkaClientFactory( final KafkaClientDescribeFactory clientDescribeFactory = new KafkaClientDescribeFactory( config, context, bindings::get, accountant::supplyDebitor); + final KafkaClientGroupFactory clientGroupFactory = new KafkaClientGroupFactory( + config, context, bindings::get, accountant::supplyDebitor); + final KafkaClientFetchFactory clientFetchFactory = new KafkaClientFetchFactory( config, context, bindings::get, accountant::supplyDebitor, supplyClientRoute); @@ -70,6 +73,7 @@ public KafkaClientFactory( final Int2ObjectHashMap factories = new Int2ObjectHashMap<>(); factories.put(KafkaBeginExFW.KIND_META, clientMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, clientDescribeFactory); + factories.put(KafkaBeginExFW.KIND_GROUP, clientGroupFactory); factories.put(KafkaBeginExFW.KIND_FETCH, clientFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, clientProduceFactory); factories.put(KafkaBeginExFW.KIND_MERGED, clientMergedFactory); @@ -85,6 +89,9 @@ public void attach( { KafkaBindingConfig kafkaBinding = new KafkaBindingConfig(binding); bindings.put(binding.id, kafkaBinding); + + KafkaClientGroupFactory clientGroupFactory = (KafkaClientGroupFactory) factories.get(KafkaBeginExFW.KIND_GROUP); + clientGroupFactory.onAttached(binding.id); } @Override @@ -92,6 +99,8 @@ public void detach( long bindingId) { bindings.remove(bindingId); + KafkaClientGroupFactory clientGroupFactory = (KafkaClientGroupFactory) factories.get(KafkaBeginExFW.KIND_GROUP); + clientGroupFactory.onDetached(bindingId); } @Override diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java new file mode 100644 index 0000000000..d044e0c257 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -0,0 +1,3010 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressProtocol.STREAM; +import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; +import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; +import static java.lang.System.currentTimeMillis; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.Supplier; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Long2ObjectHashMap; +import org.agrona.collections.LongLongConsumer; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.AssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.FindCoordinatorRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.FindCoordinatorResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.HeartbeatRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.HeartbeatResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.JoinGroupRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.JoinGroupResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.LeaveGroupRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.LeaveGroupResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.LeaveMemberFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.MemberMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.ProtocolMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.SyncGroupRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.SyncGroupResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.budget.BudgetDebitor; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; + +public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker implements BindingHandler +{ + private static final short ERROR_EXISTS = -1; + private static final short ERROR_NONE = 0; + private static final short ERROR_COORDINATOR_NOT_AVAILABLE = 15; + private static final short ERROR_NOT_COORDINATOR_FOR_CONSUMER = 16; + private static final short ERROR_UNKNOWN_MEMBER = 25; + private static final short ERROR_MEMBER_ID_REQUIRED = 79; + private static final short ERROR_REBALANCE_IN_PROGRESS = 27; + private static final short SIGNAL_NEXT_REQUEST = 1; + private static final short FIND_COORDINATOR_API_KEY = 10; + private static final short FIND_COORDINATOR_API_VERSION = 1; + private static final short JOIN_GROUP_API_KEY = 11; + private static final short JOIN_GROUP_VERSION = 5; + private static final short SYNC_GROUP_API_KEY = 14; + private static final short SYNC_GROUP_VERSION = 3; + private static final short LEAVE_GROUP_API_KEY = 13; + private static final short LEAVE_GROUP_VERSION = 3; + private static final short HEARTBEAT_API_KEY = 12; + private static final short HEARTBEAT_VERSION = 3; + + private static final String UNKNOWN_MEMBER_ID = ""; + private static final String HIGHLANDER_PROTOCOL = "highlander"; + private static final byte GROUP_KEY_TYPE = 0x00; + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final FlushFW flushRO = new FlushFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final SignalFW signalRO = new SignalFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); + private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder(); + + private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); + private final FindCoordinatorRequestFW.Builder findCoordinatorRequestRW = new FindCoordinatorRequestFW.Builder(); + private final JoinGroupRequestFW.Builder joinGroupRequestRW = new JoinGroupRequestFW.Builder(); + private final ProtocolMetadataFW.Builder protocolMetadataRW = new ProtocolMetadataFW.Builder(); + private final SyncGroupRequestFW.Builder syncGroupRequestRW = new SyncGroupRequestFW.Builder(); + private final AssignmentFW.Builder assignmentRW = new AssignmentFW.Builder(); + private final HeartbeatRequestFW.Builder heartbeatRequestRW = new HeartbeatRequestFW.Builder(); + private final LeaveGroupRequestFW.Builder leaveGroupRequestRW = new LeaveGroupRequestFW.Builder(); + private final LeaveMemberFW.Builder leaveMemberRW = new LeaveMemberFW.Builder(); + private final ResourceRequestFW.Builder resourceRequestRW = new ResourceRequestFW.Builder(); + + private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW(); + private final FindCoordinatorResponseFW findCoordinatorResponseRO = new FindCoordinatorResponseFW(); + private final JoinGroupResponseFW joinGroupResponseRO = new JoinGroupResponseFW(); + private final MemberMetadataFW memberMetadataRO = new MemberMetadataFW(); + private final SyncGroupResponseFW syncGroupResponseRO = new SyncGroupResponseFW(); + private final HeartbeatResponseFW heartbeatResponseRO = new HeartbeatResponseFW(); + private final LeaveGroupResponseFW leaveGroupResponseRO = new LeaveGroupResponseFW(); + private final LeaveMemberFW leaveMemberRO = new LeaveMemberFW(); + private final ResourceResponseFW resourceResponseRO = new ResourceResponseFW(); + + private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshakeResponse = this::decodeSaslHandshakeResponse; + private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshake = this::decodeSaslHandshake; + private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms; + private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshakeMechanism = this::decodeSaslHandshakeMechanism; + private final KafkaGroupClusterClientDecoder decodeClusterSaslAuthenticateResponse = this::decodeSaslAuthenticateResponse; + private final KafkaGroupClusterClientDecoder decodeClusterSaslAuthenticate = this::decodeSaslAuthenticate; + private final KafkaGroupClusterClientDecoder decodeFindCoordinatorResponse = this::decodeFindCoordinatorResponse; + private final KafkaGroupClusterClientDecoder decodeClusterReject = this::decodeClusterReject; + private final KafkaGroupClusterClientDecoder decodeClusterIgnoreAll = this::decodeIgnoreAll; + private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorSaslHandshakeResponse = + this::decodeSaslHandshakeResponse; + private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorSaslHandshake = + this::decodeSaslHandshake; + private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorSaslHandshakeMechanisms = + this::decodeSaslHandshakeMechanisms; + private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorSaslHandshakeMechanism = + this::decodeSaslHandshakeMechanism; + private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorSaslAuthenticateResponse = + this::decodeSaslAuthenticateResponse; + private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorSaslAuthenticate = + this::decodeSaslAuthenticate; + private final KafkaGroupCoordinatorClientDecoder decodeJoinGroupResponse = + this::decodeJoinGroupResponse; + private final KafkaGroupCoordinatorClientDecoder decodeSyncGroupResponse = + this::decodeSyncGroupResponse; + private final KafkaGroupCoordinatorClientDecoder decodeHeartbeatResponse = + this::decodeHeartbeatResponse; + private final KafkaGroupCoordinatorClientDecoder decodeLeaveGroupResponse = + this::decodeLeaveGroupResponse; + private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorIgnoreAll = this::decodeIgnoreAll; + private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorReject = this::decodeCoordinatorReject; + + private final int kafkaTypeId; + private final int proxyTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BufferPool decodePool; + private final BufferPool encodePool; + private final Signaler signaler; + private final BindingHandler streamFactory; + private final LongFunction supplyBinding; + private final Supplier supplyInstanceId; + private final Long2ObjectHashMap instanceIds; + private final Object2ObjectHashMap groupStreams; + private final String clientId; + private final Duration rebalanceTimeout; + + + public KafkaClientGroupFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding, + LongFunction supplyDebitor) + { + super(config, context); + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.proxyTypeId = context.supplyTypeId("proxy"); + this.signaler = context.signaler(); + this.streamFactory = context.streamFactory(); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.decodePool = context.bufferPool(); + this.encodePool = context.bufferPool(); + this.supplyBinding = supplyBinding; + this.rebalanceTimeout = config.clientGroupRebalanceTimeout(); + this.clientId = config.clientId(); + this.supplyInstanceId = config.clientInstanceIdSupplier(); + this.instanceIds = new Long2ObjectHashMap<>(); + this.groupStreams = new Object2ObjectHashMap<>(); + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer application) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long affinity = begin.affinity(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()); + final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? + kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null; + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_GROUP; + final KafkaGroupBeginExFW kafkaGroupBeginEx = kafkaBeginEx.group(); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved; + final int timeout = kafkaGroupBeginEx.timeout(); + final String groupId = kafkaGroupBeginEx.groupId().asString(); + final String protocol = kafkaGroupBeginEx.protocol().asString(); + + if (binding != null) + { + resolved = binding.resolve(authorization, null, groupId); + + if (resolved != null) + { + final long resolvedId = resolved.id; + final KafkaSaslConfig sasl = binding.sasl(); + + final GroupMembership groupMembership = instanceIds.get(binding.id); + assert groupMembership != null; + + KafkaGroupStream stream = groupStreams.get(groupId); + if (stream == null || HIGHLANDER_PROTOCOL.equals(protocol)) + { + if (stream != null) + { + stream.streamCleanup(traceId, traceId); + } + + KafkaGroupStream group = new KafkaGroupStream( + application, + originId, + routedId, + initialId, + affinity, + resolvedId, + groupId, + protocol, + timeout, + groupMembership, + sasl); + newStream = group::onApplication; + + groupStreams.put(groupId, group); + } + } + } + + return newStream; + } + + public void onAttached( + long bindingId) + { + instanceIds.put(bindingId, new GroupMembership(supplyInstanceId.get())); + } + + public void onDetached( + long bindingId) + { + instanceIds.remove(bindingId); + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer payload, + int offset, + int length, + Consumer extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload, offset, length) + .extension(extension) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Consumer extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + @FunctionalInterface + private interface KafkaGroupClusterClientDecoder + { + int decode( + ClusterClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int progress, + int limit); + } + + @FunctionalInterface + private interface KafkaGroupCoordinatorClientDecoder + { + int decode( + CoordinatorClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int progress, + int limit); + } + + private int decodeFindCoordinatorResponse( + ClusterClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + client.decoder = decodeClusterIgnoreAll; + break decode; + } + + final int responseSize = responseHeader.length(); + + if (length >= responseHeader.sizeof() + responseSize) + { + progress = responseHeader.limit(); + + final FindCoordinatorResponseFW findCoordinatorResponse = + findCoordinatorResponseRO.tryWrap(buffer, progress, limit); + + if (findCoordinatorResponse == null) + { + client.decoder = decodeClusterIgnoreAll; + break decode; + } + else if (findCoordinatorResponse.errorCode() == ERROR_COORDINATOR_NOT_AVAILABLE) + { + client.onCoordinatorNotAvailable(traceId, authorization); + } + else if (findCoordinatorResponse.errorCode() == ERROR_NONE) + { + client.onFindCoordinator(traceId, authorization, + findCoordinatorResponse.host(), findCoordinatorResponse.port()); + } + else + { + client.decoder = decodeClusterIgnoreAll; + } + + progress = findCoordinatorResponse.limit(); + } + } + + if (client.decoder == decodeClusterIgnoreAll) + { + client.onError(traceId); + } + + return progress; + } + + + private int decodeClusterReject( + ClusterClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + client.doNetworkReset(traceId); + client.decoder = decodeClusterIgnoreAll; + return limit; + } + + private int decodeCoordinatorReject( + CoordinatorClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + client.doNetworkReset(traceId); + client.decoder = decodeCoordinatorIgnoreAll; + return limit; + } + + private int decodeIgnoreAll( + KafkaSaslClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + return limit; + } + + private int decodeJoinGroupResponse( + CoordinatorClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + client.decoder = decodeJoinGroupResponse; + progress = limit; + break decode; + } + + final int responseSize = responseHeader.length(); + + if (length >= responseHeader.sizeof() + responseSize) + { + progress = responseHeader.limit(); + + final JoinGroupResponseFW joinGroupResponse = + joinGroupResponseRO.tryWrap(buffer, progress, limit); + + final short errorCode = joinGroupResponse != null ? joinGroupResponse.errorCode() : ERROR_EXISTS; + + if (joinGroupResponse == null) + { + client.decoder = decodeJoinGroupResponse; + progress = limit; + break decode; + } + else if (errorCode == ERROR_NOT_COORDINATOR_FOR_CONSUMER) + { + client.onNotCoordinatorError(traceId, authorization); + progress = joinGroupResponse.limit(); + } + else if (errorCode == ERROR_UNKNOWN_MEMBER) + { + client.onJoinGroupMemberIdError(traceId, authorization, UNKNOWN_MEMBER_ID); + progress = joinGroupResponse.limit(); + } + else if (errorCode == ERROR_MEMBER_ID_REQUIRED) + { + client.onJoinGroupMemberIdError(traceId, authorization, + joinGroupResponse.memberId().asString()); + progress = joinGroupResponse.limit(); + } + else if (errorCode == ERROR_NONE) + { + progress = joinGroupResponse.limit(); + client.members.clear(); + + client.generationId = joinGroupResponse.generatedId(); + + metadata: + for (int i = 0; i < joinGroupResponse.memberCount(); i++) + { + final MemberMetadataFW memberMetadata = memberMetadataRO.tryWrap(buffer, progress, limit); + if (memberMetadata != null) + { + client.members.add(memberMetadata.memberId().asString()); + progress = memberMetadata.limit(); + } + else + { + break metadata; + } + } + + client.onJoinGroupResponse(traceId, authorization, joinGroupResponse.leader().asString(), + joinGroupResponse.memberId().asString(), errorCode); + } + else + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + + } + } + + if (client.decoder == decodeCoordinatorIgnoreAll) + { + client.onError(traceId); + } + + return progress; + } + + private int decodeSyncGroupResponse( + CoordinatorClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + + final int responseSize = responseHeader.length(); + + if (length >= responseHeader.sizeof() + responseSize) + { + progress = responseHeader.limit(); + + final SyncGroupResponseFW syncGroupResponse = + syncGroupResponseRO.tryWrap(buffer, progress, limit); + + final short errorCode = syncGroupResponse != null ? syncGroupResponse.errorCode() : ERROR_EXISTS; + + if (syncGroupResponse == null) + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + else if (errorCode == ERROR_REBALANCE_IN_PROGRESS) + { + client.onSynGroupRebalance(traceId, authorization); + } + else if (errorCode == ERROR_NONE) + { + client.onSyncGroupResponse(traceId, authorization, syncGroupResponse.assignment()); + } + else + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + + progress = syncGroupResponse.limit(); + } + } + + if (client.decoder == decodeCoordinatorIgnoreAll) + { + client.onError(traceId); + } + + return progress; + } + + private int decodeHeartbeatResponse( + CoordinatorClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + + final int responseSize = responseHeader.length(); + + if (length >= responseHeader.sizeof() + responseSize) + { + progress = responseHeader.limit(); + + final HeartbeatResponseFW heartbeatResponse = + heartbeatResponseRO.tryWrap(buffer, progress, limit); + + if (heartbeatResponse == null) + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + else if (heartbeatResponse.errorCode() == ERROR_REBALANCE_IN_PROGRESS) + { + client.onRebalanceError(traceId, authorization); + } + else if (heartbeatResponse.errorCode() == ERROR_NONE) + { + client.onHeartbeatResponse(traceId, authorization); + } + else + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + + progress = heartbeatResponse.limit(); + } + } + + if (client.decoder == decodeCoordinatorIgnoreAll) + { + client.onError(traceId); + } + + return progress; + } + + private int decodeLeaveGroupResponse( + CoordinatorClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + + final int responseSize = responseHeader.length(); + + if (length >= responseHeader.sizeof() + responseSize) + { + progress = responseHeader.limit(); + + final LeaveGroupResponseFW leaveGroupResponse = + leaveGroupResponseRO.tryWrap(buffer, progress, limit); + + if (leaveGroupResponse == null) + { + client.decoder = decodeCoordinatorIgnoreAll; + break decode; + } + else + { + progress = leaveGroupResponse.limit(); + + members: + for (int i = 0; i < leaveGroupResponse.memberCount(); i++) + { + final LeaveMemberFW member = leaveMemberRO.tryWrap(buffer, progress, limit); + if (member != null) + { + progress = member.limit(); + } + else + { + break members; + } + } + + client.onLeaveGroupResponse(traceId, authorization); + } + } + } + + if (client.decoder == decodeCoordinatorIgnoreAll) + { + client.onError(traceId); + } + + return progress; + } + + private final class KafkaGroupStream + { + private final MessageConsumer application; + private final ClusterClient clusterClient; + private final CoordinatorClient coordinatorClient; + private final GroupMembership groupMembership; + private final String groupId; + private final String protocol; + private final int timeout; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long resolvedId; + private final KafkaSaslConfig sasl; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private long replyBudgetId; + + KafkaGroupStream( + MessageConsumer application, + long originId, + long routedId, + long initialId, + long affinity, + long resolvedId, + String groupId, + String protocol, + int timeout, + GroupMembership groupMembership, + KafkaSaslConfig sasl) + { + this.application = application; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.groupId = groupId; + this.protocol = protocol; + this.timeout = timeout; + this.resolvedId = resolvedId; + this.groupMembership = groupMembership; + this.sasl = sasl; + this.clusterClient = new ClusterClient(routedId, resolvedId, sasl, this); + this.coordinatorClient = new CoordinatorClient(routedId, resolvedId, sasl, this); + } + + private void onApplication( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onApplicationBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onApplicationData(data); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onApplicationFlush(flush); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onApplicationEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onApplicationAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onApplicationWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onApplicationReset(reset); + break; + default: + break; + } + } + + private void onApplicationBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + + state = KafkaState.openingInitial(state); + + clusterClient.doNetworkBeginIfNecessary(traceId, authorization, affinity); + doApplicationWindow(traceId, 0L, 0, 0, 0); + } + + private void onApplicationData( + DataFW data) + { + final long traceId = data.traceId(); + final long budgetId = data.budgetId(); + + coordinatorClient.doSyncRequest(traceId, budgetId, data.payload()); + } + + private void onApplicationEnd( + EndFW end) + { + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + state = KafkaState.closingInitial(state); + coordinatorClient.doLeaveGroupRequest(traceId); + } + + private void onApplicationFlush( + FlushFW flush) + { + final long traceId = flush.traceId(); + + coordinatorClient.doHeartbeat(traceId); + } + + private void onApplicationAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + state = KafkaState.closedInitial(state); + + clusterClient.doNetworkAbort(traceId); + coordinatorClient.doNetworkAbort(traceId); + + cleanupApplication(traceId, EMPTY_OCTETS); + } + + private void onApplicationWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + this.replyAck = acknowledge; + this.replyMax = maximum; + this.replyPad = padding; + this.replyBudgetId = budgetId; + + assert replyAck <= replySeq; + } + + private void onApplicationReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + clusterClient.doNetworkReset(traceId); + } + + private boolean isApplicationReplyOpen() + { + return KafkaState.replyOpening(state); + } + + private void doApplicationBeginIfNecessary( + long traceId, + long authorization) + { + if (!KafkaState.replyOpening(state)) + { + doApplicationBegin(traceId, authorization); + } + } + + private void doApplicationBegin( + long traceId, + long authorization) + { + state = KafkaState.openingReply(state); + + doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + private void doApplicationData( + long traceId, + long authorization, + OctetsFW payload, + Consumer extension) + { + final int reserved = replyPad; + + if (payload.sizeof() > 0) + { + doData(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, + payload.value(), payload.offset(), payload.sizeof(), extension); + } + else + { + doDataNull(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, extension); + } + + replySeq += reserved; + + assert replyAck <= replySeq; + } + + private void doApplicationEnd( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + state = KafkaState.closedReply(state); + doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, 0, EMPTY_EXTENSION); + } + } + + private void doApplicationAbort( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + state = KafkaState.closedReply(state); + doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, 0, EMPTY_EXTENSION); + } + } + + private void doApplicationWindow( + long traceId, + long budgetId, + int minInitialNoAck, + int minInitialPad, + int minInitialMax) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !KafkaState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + state = KafkaState.openedInitial(state); + + doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, clusterClient.authorization, budgetId, minInitialPad); + } + } + + private void doApplicationReset( + long traceId, + Flyweight extension) + { + state = KafkaState.closedInitial(state); + + doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, clusterClient.authorization, extension); + } + + private void doApplicationAbortIfNecessary( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doApplicationAbort(traceId); + } + } + + private void doApplicationResetIfNecessary( + long traceId, + Flyweight extension) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + doApplicationReset(traceId, extension); + } + } + + private void onNotCoordinatorError( + long traceId, + long authority) + { + clusterClient.doNetworkBeginIfNecessary(traceId, authority, affinity); + } + + private void cleanupApplication( + long traceId, + int error) + { + final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .error(error) + .build(); + + cleanupApplication(traceId, kafkaResetEx); + } + + private void cleanupApplication( + long traceId, + Flyweight extension) + { + doApplicationResetIfNecessary(traceId, extension); + doApplicationAbortIfNecessary(traceId); + + groupStreams.remove(groupId); + } + + private void streamCleanup( + long traceId, + long authorizationId) + { + cleanupApplication(traceId, EMPTY_OCTETS); + clusterClient.cleanupNetwork(traceId, authorizationId); + coordinatorClient.cleanupNetwork(traceId, authorizationId); + } + } + + private final class ClusterClient extends KafkaSaslClient + { + private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; + private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; + private final LongLongConsumer encodeFindCoordinatorRequest = this::doEncodeFindCoordinatorRequest; + private final KafkaGroupStream delegate; + + private MessageConsumer network; + + private int state; + private long authorization; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + private long initialBudgetId; + + private long replySeq; + private long replyAck; + private int replyMax; + + private int encodeSlot = NO_SLOT; + private int encodeSlotOffset; + private long encodeSlotTraceId; + + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + private int decodeSlotReserved; + + private int nextResponseId; + + private KafkaGroupClusterClientDecoder decoder; + private LongLongConsumer encoder; + + ClusterClient( + long originId, + long routedId, + KafkaSaslConfig sasl, + KafkaGroupStream delegate) + { + super(sasl, originId, routedId); + + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeFindCoordinatorRequest; + this.delegate = delegate; + this.decoder = decodeClusterReject; + } + + private void onNetwork( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onNetworkBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onNetworkData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onNetworkEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onNetworkAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onNetworkReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onNetworkWindow(window); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onNetworkSignal(signal); + break; + default: + break; + } + } + + private void onNetworkBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + authorization = begin.authorization(); + state = KafkaState.openingReply(state); + + doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity()); + } + + private void onNetworkData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long budgetId = data.budgetId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + data.reserved(); + authorization = data.authorization(); + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + onError(traceId); + } + else + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + onError(traceId); + } + else + { + final OctetsFW payload = data.payload(); + int reserved = data.reserved(); + int offset = payload.offset(); + int limit = payload.limit(); + + final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot); + buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset); + decodeSlotOffset += limit - offset; + decodeSlotReserved += reserved; + + offset = 0; + limit = decodeSlotOffset; + reserved = decodeSlotReserved; + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + } + + private void onNetworkEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + cleanupDecodeSlotIfNecessary(); + + if (!delegate.isApplicationReplyOpen()) + { + onError(traceId); + } + else if (decodeSlot == NO_SLOT) + { + delegate.doApplicationEnd(traceId); + } + } + + private void onNetworkAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + onError(traceId); + } + + private void onNetworkReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + onError(traceId); + } + + private void onNetworkWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= initialSeq; + assert acknowledge >= initialAck; + assert maximum + acknowledge >= initialMax + initialAck; + + this.initialAck = acknowledge; + this.initialMax = maximum; + this.initialPad = padding; + this.initialBudgetId = budgetId; + + assert initialAck <= initialSeq; + + this.authorization = window.authorization(); + + state = KafkaState.openedInitial(state); + + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); + final int limit = encodeSlotOffset; + + encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); + } + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void onNetworkSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final int signalId = signal.signalId(); + + if (signalId == SIGNAL_NEXT_REQUEST) + { + doEncodeRequestIfNecessary(traceId, initialBudgetId); + } + } + + private void doNetworkBeginIfNecessary( + long traceId, + long authorization, + long affinity) + { + if (KafkaState.closed(state)) + { + replyAck = 0; + replySeq = 0; + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + doNetworkBegin(traceId, authorization, affinity); + } + } + + private void doNetworkBegin( + long traceId, + long authorization, + long affinity) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + state = KafkaState.openingInitial(state); + + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + @Override + protected void doNetworkData( + long traceId, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset); + encodeSlotOffset += limit - offset; + encodeSlotTraceId = traceId; + + buffer = encodeBuffer; + offset = 0; + limit = encodeSlotOffset; + } + + encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit); + } + + private void doNetworkEnd( + long traceId, + long authorization) + { + if (!KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + } + + cleanupEncodeSlotIfNecessary(); + } + + private void doNetworkAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + state = KafkaState.closedInitial(state); + } + + cleanupEncodeSlotIfNecessary(); + } + + private void doNetworkReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + state = KafkaState.closedReply(state); + } + + cleanupDecodeSlotIfNecessary(); + } + + private void doNetworkWindow( + long traceId, + long budgetId, + int minReplyNoAck, + int minReplyPad, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, minReplyPad); + } + } + + private void doEncodeRequestIfNecessary( + long traceId, + long budgetId) + { + if (nextRequestId == nextResponseId) + { + encoder.accept(traceId, budgetId); + } + } + + private void doEncodeFindCoordinatorRequest( + long traceId, + long budgetId) + { + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + int encodeProgress = encodeOffset; + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(FIND_COORDINATOR_API_KEY) + .apiVersion(FIND_COORDINATOR_API_VERSION) + .correlationId(0) + .clientId(clientId) + .build(); + + encodeProgress = requestHeader.limit(); + + final FindCoordinatorRequestFW findCoordinatorRequest = + findCoordinatorRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .key(delegate.groupId) + .keyType(GROUP_KEY_TYPE) + .build(); + + encodeProgress = findCoordinatorRequest.limit(); + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeFindCoordinatorResponse; + } + + private void encodeNetwork( + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + final int maxLength = limit - offset; + final int initialWin = initialMax - (int)(initialSeq - initialAck); + final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + + if (length > 0) + { + final int reserved = length + initialPad; + + doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); + + initialSeq += reserved; + + assert initialAck <= initialSeq; + } + + final int remaining = maxLength - length; + if (remaining > 0) + { + if (encodeSlot == NO_SLOT) + { + encodeSlot = encodePool.acquire(initialId); + } + + if (encodeSlot == NO_SLOT) + { + onError(traceId); + } + else + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeSlotOffset = remaining; + } + } + else + { + cleanupEncodeSlotIfNecessary(); + } + } + + private void decodeNetwork( + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int limit) + { + KafkaGroupClusterClientDecoder previous = null; + int progress = offset; + while (progress <= limit && previous != decoder) + { + previous = decoder; + progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit); + } + + if (progress < limit) + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + onError(traceId); + } + else + { + final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot); + decodeBuffer.putBytes(0, buffer, progress, limit - progress); + decodeSlotOffset = limit - progress; + decodeSlotReserved = (limit - progress) * reserved / (limit - offset); + } + + doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax); + } + else + { + cleanupDecodeSlotIfNecessary(); + + if (reserved > 0) + { + doNetworkWindow(traceId, budgetId, 0, 0, replyMax); + } + } + } + + @Override + protected void doDecodeSaslHandshakeResponse( + long traceId) + { + decoder = decodeClusterSaslHandshakeResponse; + } + + @Override + protected void doDecodeSaslHandshake( + long traceId) + { + decoder = decodeClusterSaslHandshake; + } + + @Override + protected void doDecodeSaslHandshakeMechanisms( + long traceId) + { + decoder = decodeClusterSaslHandshakeMechanisms; + } + + @Override + protected void doDecodeSaslHandshakeMechansim( + long traceId) + { + decoder = decodeClusterSaslHandshakeMechanism; + } + + @Override + protected void doDecodeSaslAuthenticateResponse( + long traceId) + { + decoder = decodeClusterSaslAuthenticateResponse; + } + + @Override + protected void doDecodeSaslAuthenticate( + long traceId) + { + decoder = decodeClusterSaslAuthenticate; + } + + @Override + protected void onDecodeSaslHandshakeResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeSaslAuthenticateRequest; + decoder = decodeClusterSaslAuthenticateResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslAuthenticateResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeFindCoordinatorRequest; + decoder = decodeFindCoordinatorResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslResponse( + long traceId) + { + nextResponseId++; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onCoordinatorNotAvailable( + long traceId, + long authorization) + { + nextResponseId++; + + encoder = encodeFindCoordinatorRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onFindCoordinator( + long traceId, + long authorization, + String16FW host, + int port) + { + nextResponseId++; + + delegate.coordinatorClient.doNetworkBeginIfNecessary(traceId, authorization, 0, host, port); + + cleanupNetwork(traceId, authorization); + } + + private void cleanupNetwork( + long traceId, + long authorization) + { + replySeq = 0; + replyAck = 0; + + doNetworkEnd(traceId, authorization); + doNetworkReset(traceId); + } + + private void onError( + long traceId) + { + doNetworkAbort(traceId); + doNetworkReset(traceId); + + delegate.cleanupApplication(traceId, EMPTY_OCTETS); + } + + private void cleanupDecodeSlotIfNecessary() + { + if (decodeSlot != NO_SLOT) + { + decodePool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + decodeSlotReserved = 0; + } + } + + private void cleanupEncodeSlotIfNecessary() + { + if (encodeSlot != NO_SLOT) + { + encodePool.release(encodeSlot); + encodeSlot = NO_SLOT; + encodeSlotOffset = 0; + encodeSlotTraceId = 0; + } + } + } + + private final class CoordinatorClient extends KafkaSaslClient + { + private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; + private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; + private final LongLongConsumer encodeJoinGroupRequest = this::doEncodeJoinGroupRequest; + private final LongLongConsumer encodeSyncGroupRequest = this::doEncodeSyncGroupRequest; + private final LongLongConsumer encodeHeartbeatRequest = this::doEncodeHeartbeatRequest; + private final LongLongConsumer encodeLeaveGroupRequest = this::doEncodeLeaveGroupRequest; + private final List members; + private final KafkaGroupStream delegate; + + private MessageConsumer network; + + private int state; + private long authorization; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + private long initialBudgetId; + + private long replySeq; + private long replyAck; + private int replyMax; + + private int encodeSlot = NO_SLOT; + private int encodeSlotOffset; + private long encodeSlotTraceId; + + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + private int decodeSlotReserved; + + private int nextResponseId; + private long heartbeatRequestId = NO_CANCEL_ID; + + private String leader; + + private int generationId; + private KafkaGroupCoordinatorClientDecoder decoder; + private LongLongConsumer encoder; + private OctetsFW assignment = EMPTY_OCTETS; + + CoordinatorClient( + long originId, + long routedId, + KafkaSaslConfig sasl, + KafkaGroupStream delegate) + { + super(sasl, originId, routedId); + + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeJoinGroupRequest; + this.delegate = delegate; + this.decoder = decodeCoordinatorReject; + this.members = new ArrayList<>(); + } + + private void onNetwork( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onNetworkBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onNetworkData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onNetworkEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onNetworkAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onNetworkReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onNetworkWindow(window); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onNetworkSignal(signal); + break; + default: + break; + } + } + + private void onNetworkBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + authorization = begin.authorization(); + state = KafkaState.openingReply(state); + + doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity()); + } + + private void onNetworkData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long budgetId = data.budgetId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + data.reserved(); + authorization = data.authorization(); + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + onError(traceId); + } + else + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + onError(traceId); + } + else + { + final OctetsFW payload = data.payload(); + int reserved = data.reserved(); + int offset = payload.offset(); + int limit = payload.limit(); + + final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot); + buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset); + decodeSlotOffset += limit - offset; + decodeSlotReserved += reserved; + + offset = 0; + limit = decodeSlotOffset; + reserved = decodeSlotReserved; + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + } + + private void onNetworkEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + cleanupDecodeSlotIfNecessary(); + + if (!delegate.isApplicationReplyOpen()) + { + onError(traceId); + } + } + + private void onNetworkAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + onError(traceId); + } + + private void onNetworkReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + onError(traceId); + } + + private void onNetworkWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= initialSeq; + assert acknowledge >= initialAck; + assert maximum + acknowledge >= initialMax + initialAck; + + this.initialAck = acknowledge; + this.initialMax = maximum; + this.initialPad = padding; + this.initialBudgetId = budgetId; + + assert initialAck <= initialSeq; + + this.authorization = window.authorization(); + + state = KafkaState.openedInitial(state); + + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); + final int limit = encodeSlotOffset; + + encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); + } + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void onNetworkSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final int signalId = signal.signalId(); + + if (signalId == SIGNAL_NEXT_REQUEST) + { + doEncodeRequestIfNecessary(traceId, initialBudgetId); + } + } + + private void doNetworkBeginIfNecessary( + long traceId, + long authorization, + long affinity, + String16FW host, + int port) + { + if (KafkaState.closed(state)) + { + replyAck = 0; + replySeq = 0; + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + doNetworkBegin(traceId, authorization, affinity, host, port); + } + } + + private void doNetworkBegin( + long traceId, + long authorization, + long affinity, + String16FW host, + int port) + { + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + state = KafkaState.openingInitial(state); + + Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) + .typeId(proxyTypeId) + .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) + .source("0.0.0.0") + .destination(host) + .sourcePort(0) + .destinationPort(port))) + .build() + .sizeof()); + + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, extension); + } + + @Override + protected void doNetworkData( + long traceId, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset); + encodeSlotOffset += limit - offset; + encodeSlotTraceId = traceId; + + buffer = encodeBuffer; + offset = 0; + limit = encodeSlotOffset; + } + + encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit); + } + + private void doNetworkEnd( + long traceId, + long authorization) + { + if (!KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + } + + cleanupEncodeSlotIfNecessary(); + + } + + private void doNetworkAbort( + long traceId) + { + if (KafkaState.initialOpened(state) && + !KafkaState.initialClosed(state)) + { + doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + state = KafkaState.closedInitial(state); + } + + cleanupEncodeSlotIfNecessary(); + } + + private void doNetworkReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + state = KafkaState.closedReply(state); + } + + cleanupDecodeSlotIfNecessary(); + } + + private void doNetworkWindow( + long traceId, + long budgetId, + int minReplyNoAck, + int minReplyPad, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, minReplyPad); + } + } + + private void doEncodeRequestIfNecessary( + long traceId, + long budgetId) + { + if (nextRequestId == nextResponseId) + { + encoder.accept(traceId, budgetId); + } + } + + private void doEncodeJoinGroupRequest( + long traceId, + long budgetId) + { + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + int encodeProgress = encodeOffset; + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(JOIN_GROUP_API_KEY) + .apiVersion(JOIN_GROUP_VERSION) + .correlationId(0) + .clientId(clientId) + .build(); + + encodeProgress = requestHeader.limit(); + + final String memberId = delegate.groupMembership.memberIds.getOrDefault(delegate.groupId, UNKNOWN_MEMBER_ID); + + final JoinGroupRequestFW joinGroupRequest = + joinGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .groupId(delegate.groupId) + .sessionTimeoutMillis(delegate.timeout) + .rebalanceTimeoutMillis((int) rebalanceTimeout.toMillis()) + .memberId(memberId) + .groupInstanceId(delegate.groupMembership.instanceId) + .protocolType("consumer") + .protocolCount(1) + .build(); + + encodeProgress = joinGroupRequest.limit(); + + final ProtocolMetadataFW protocolMetadata = + protocolMetadataRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .name(delegate.protocol) + .metadata(EMPTY_OCTETS) + .build(); + + encodeProgress = protocolMetadata.limit(); + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeJoinGroupResponse; + + delegate.doApplicationBeginIfNecessary(traceId, authorization); + } + + private void doEncodeSyncGroupRequest( + long traceId, + long budgetId) + { + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + int encodeProgress = encodeOffset; + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(SYNC_GROUP_API_KEY) + .apiVersion(SYNC_GROUP_VERSION) + .correlationId(0) + .clientId(clientId) + .build(); + + encodeProgress = requestHeader.limit(); + + final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); + + final SyncGroupRequestFW syncGroupRequest = + syncGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .groupId(delegate.groupId) + .generatedId(generationId) + .memberId(memberId) + .groupInstanceId(delegate.groupMembership.instanceId) + .assignmentCount(members.size()) + .build(); + + encodeProgress = syncGroupRequest.limit(); + + for (int i = 0; i < members.size(); i++) + { + final AssignmentFW groupAssignment = + assignmentRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .memberId(members.get(i)) + .value(assignment) + .build(); + + encodeProgress = groupAssignment.limit(); + } + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeSyncGroupResponse; + } + + private void doEncodeHeartbeatRequest( + long traceId, + long budgetId) + { + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + int encodeProgress = encodeOffset; + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(HEARTBEAT_API_KEY) + .apiVersion(HEARTBEAT_VERSION) + .correlationId(0) + .clientId(clientId) + .build(); + + encodeProgress = requestHeader.limit(); + + final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); + + final HeartbeatRequestFW heartbeatRequest = + heartbeatRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .groupId(delegate.groupId) + .generatedId(generationId) + .memberId(memberId) + .groupInstanceId(delegate.groupMembership.instanceId) + .build(); + + encodeProgress = heartbeatRequest.limit(); + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeHeartbeatResponse; + } + + private void doEncodeLeaveGroupRequest( + long traceId, + long budgetId) + { + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + int encodeProgress = encodeOffset; + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(LEAVE_GROUP_API_KEY) + .apiVersion(LEAVE_GROUP_VERSION) + .correlationId(0) + .clientId(clientId) + .build(); + + encodeProgress = requestHeader.limit(); + + final LeaveGroupRequestFW leaveGroupRequest = + leaveGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .groupId(delegate.groupId) + .memberCount(1) + .build(); + + encodeProgress = leaveGroupRequest.limit(); + + final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); + + final LeaveMemberFW leaveMember = leaveMemberRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .memberId(memberId) + .groupInstanceId(delegate.groupMembership.instanceId) + .build(); + + encodeProgress = leaveMember.limit(); + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeLeaveGroupResponse; + } + + private void doSyncRequest( + long traceId, + long budgetId, + OctetsFW assignment) + { + this.assignment = assignment; + doEncodeSyncGroupRequest(traceId, budgetId); + } + + private void doHeartbeat( + long traceId) + { + if (heartbeatRequestId != NO_CANCEL_ID) + { + signaler.cancel(heartbeatRequestId); + heartbeatRequestId = NO_CANCEL_ID; + } + + encoder = encodeHeartbeatRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void doLeaveGroupRequest( + long traceId) + { + if (heartbeatRequestId != NO_CANCEL_ID) + { + signaler.cancel(heartbeatRequestId); + heartbeatRequestId = NO_CANCEL_ID; + } + + encoder = encodeLeaveGroupRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void encodeNetwork( + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + final int maxLength = limit - offset; + final int initialWin = initialMax - (int)(initialSeq - initialAck); + final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + + if (length > 0) + { + final int reserved = length + initialPad; + + doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); + + initialSeq += reserved; + + assert initialAck <= initialSeq; + } + + final int remaining = maxLength - length; + if (remaining > 0) + { + if (encodeSlot == NO_SLOT) + { + encodeSlot = encodePool.acquire(initialId); + } + + if (encodeSlot == NO_SLOT) + { + onError(traceId); + } + else + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeSlotOffset = remaining; + } + } + else + { + cleanupEncodeSlotIfNecessary(); + } + } + + private void decodeNetwork( + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int limit) + { + KafkaGroupCoordinatorClientDecoder previous = null; + int progress = offset; + while (progress <= limit && previous != decoder) + { + previous = decoder; + progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit); + } + + if (progress < limit) + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + onError(traceId); + } + else + { + final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot); + decodeBuffer.putBytes(0, buffer, progress, limit - progress); + decodeSlotOffset = limit - progress; + decodeSlotReserved = (limit - progress) * reserved / (limit - offset); + } + + doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax); + } + else + { + cleanupDecodeSlotIfNecessary(); + + if (reserved > 0) + { + doNetworkWindow(traceId, budgetId, 0, 0, replyMax); + } + } + } + + @Override + protected void doDecodeSaslHandshakeResponse( + long traceId) + { + decoder = decodeCoordinatorSaslHandshakeResponse; + } + + @Override + protected void doDecodeSaslHandshake( + long traceId) + { + decoder = decodeCoordinatorSaslHandshake; + } + + @Override + protected void doDecodeSaslHandshakeMechanisms( + long traceId) + { + decoder = decodeCoordinatorSaslHandshakeMechanisms; + } + + @Override + protected void doDecodeSaslHandshakeMechansim( + long traceId) + { + decoder = decodeCoordinatorSaslHandshakeMechanism; + } + + @Override + protected void doDecodeSaslAuthenticateResponse( + long traceId) + { + decoder = decodeCoordinatorSaslAuthenticateResponse; + } + + @Override + protected void doDecodeSaslAuthenticate( + long traceId) + { + decoder = decodeCoordinatorSaslAuthenticate; + } + + @Override + protected void onDecodeSaslHandshakeResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeSaslAuthenticateRequest; + decoder = decodeCoordinatorSaslAuthenticateResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslAuthenticateResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeJoinGroupRequest; + decoder = decodeJoinGroupResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslResponse( + long traceId) + { + nextResponseId++; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onNotCoordinatorError( + long traceId, + long authorization) + { + nextResponseId++; + + cleanupNetwork(traceId, authorization); + + delegate.onNotCoordinatorError(traceId, authorization); + } + + private void onJoinGroupUnknownMemberError( + long traceId, + long authorization) + { + nextResponseId++; + + delegate.groupMembership.memberIds.put(delegate.groupId, UNKNOWN_MEMBER_ID); + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onJoinGroupMemberIdError( + long traceId, + long authorization, + String memberId) + { + nextResponseId++; + + delegate.groupMembership.memberIds.put(delegate.groupId, memberId); + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onJoinGroupResponse( + long traceId, + long authorization, + String leader, + String memberId, + int error) + { + nextResponseId++; + + this.leader = leader; + + delegate.groupMembership.memberIds.put(delegate.groupId, memberId); + + encoder = encodeSyncGroupRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onSynGroupRebalance( + long traceId, + long authorization) + { + nextResponseId++; + + encoder = encodeJoinGroupRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onSyncGroupResponse( + long traceId, + long authorization, + OctetsFW assignment) + { + nextResponseId++; + + final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); + + delegate.doApplicationData(traceId, authorization, assignment, + ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .group(g -> g.leaderId(leader).memberId(memberId)) + .build() + .sizeof())); + + if (heartbeatRequestId != NO_CANCEL_ID) + { + encoder = encodeHeartbeatRequest; + + heartbeatRequestId = signaler.signalAt(currentTimeMillis() + delegate.timeout / 2, + originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + } + + private void onHeartbeatResponse( + long traceId, + long authorization) + { + nextResponseId++; + + if (heartbeatRequestId != NO_CANCEL_ID) + { + signaler.cancel(heartbeatRequestId); + heartbeatRequestId = NO_CANCEL_ID; + } + + encoder = encodeHeartbeatRequest; + + heartbeatRequestId = signaler.signalAt(currentTimeMillis() + delegate.timeout / 2, + originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onLeaveGroupResponse( + long traceId, + long authorization) + { + doNetworkEnd(traceId, authorization); + doNetworkReset(traceId); + + delegate.doApplicationEnd(traceId); + delegate.doApplicationResetIfNecessary(traceId, EMPTY_OCTETS); + } + + private void onRebalanceError( + long traceId, + long authorization) + { + nextResponseId++; + + encoder = encodeJoinGroupRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void cleanupNetwork( + long traceId, + long authorization) + { + doNetworkEnd(traceId, authorization); + doNetworkReset(traceId); + } + + private void onError( + long traceId) + { + doNetworkAbort(traceId); + doNetworkReset(traceId); + + delegate.cleanupApplication(traceId, EMPTY_OCTETS); + } + + private void cleanupDecodeSlotIfNecessary() + { + if (decodeSlot != NO_SLOT) + { + decodePool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + decodeSlotReserved = 0; + } + } + + private void cleanupEncodeSlotIfNecessary() + { + if (encodeSlot != NO_SLOT) + { + encodePool.release(encodeSlot); + encodeSlot = NO_SLOT; + encodeSlotOffset = 0; + encodeSlotTraceId = 0; + } + } + } + + private final class GroupMembership + { + public final String instanceId; + public final Map memberIds; + + GroupMembership( + String instanceId) + { + this.instanceId = instanceId; + this.memberIds = new Object2ObjectHashMap<>(); + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java index 00ed048dbe..13337e1505 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java @@ -113,8 +113,8 @@ public abstract class KafkaSaslClient protected final KafkaSaslConfig sasl; protected final long originId; protected final long routedId; - protected final long initialId; - protected final long replyId; + protected long initialId; + protected long replyId; protected int nextRequestId; diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl index d3267cb430..02cbe07d4b 100644 --- a/runtime/binding-kafka/src/main/zilla/protocol.idl +++ b/runtime/binding-kafka/src/main/zilla/protocol.idl @@ -336,7 +336,145 @@ scope protocol struct ProduceResponseTrailer { - int32 thottleTimeMillis; + int32 throttleTimeMillis; + } + } + + scope group + { + struct FindCoordinatorRequest + { + string16 key; + int8 keyType; + } + + struct FindCoordinatorResponse + { + int32 correlationId; + int32 throttleTimeMillis; + int16 errorCode; + string16 errorMessage = null; + int32 nodeId; + string16 host; + int32 port; + } + + struct JoinGroupRequest + { + string16 groupId; + int32 sessionTimeoutMillis; + int32 rebalanceTimeoutMillis; + string16 memberId; + string16 groupInstanceId = null; + string16 protocolType; + int32 protocolCount; + } + + struct ProtocolMetadata + { + string16 name; + uint32 metadataLength; + octets[metadataLength] metadata; + } + + struct RangeProtocol + { + int32 version; + string16 topic; + int32 partitionCount; + } + + struct JoinGroupResponse + { + int32 correlationId; + int32 throttleTimeMillis; + int16 errorCode; + int32 generatedId; + string16 protocolName; + string16 leader; + string16 memberId; + int32 memberCount; + } + + struct MemberMetadata + { + string16 memberId; + string16 groupInstanceId = null; + uint32 length; + octets[length] metadata; + } + + struct SyncGroupRequest + { + string16 groupId; + int32 generatedId; + string16 memberId; + string16 groupInstanceId = null; + int32 assignmentCount; + } + + struct Assignment + { + string16 memberId; + uint32 length; + octets[length] value; + } + + struct TopicPartition + { + int32 version; + string16 topic; + int32 partitionCount; + } + + struct Partition + { + int32 partitionId; + int32 offsetId; + } + + struct SyncGroupResponse + { + int32 correlationId; + int32 throttleTimeMillis; + int16 errorCode; + uint32 assignmentLength; + octets[assignmentLength] assignment; + } + + struct HeartbeatRequest + { + string16 groupId; + int32 generatedId; + string16 memberId; + string16 groupInstanceId = null; + } + + struct HeartbeatResponse + { + int32 correlationId; + int32 throttleTimeMillis; + int16 errorCode; + } + + struct LeaveGroupRequest + { + string16 groupId; + int32 memberCount; + } + + struct LeaveGroupResponse + { + int32 correlationId; + int32 throttleTimeMillis; + int16 errorCode; + int32 memberCount; + } + + struct LeaveMember + { + string16 memberId; + string16 groupInstanceId = null; } } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapterTest.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapterTest.java index 05c8731c2f..d28a654c15 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapterTest.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapterTest.java @@ -44,23 +44,25 @@ public void shouldReadCondition() { String text = "{" + - "\"topic\": \"test\"" + + "\"topic\": \"test\"," + + "\"groupId\": \"test\"" + "}"; KafkaConditionConfig condition = jsonb.fromJson(text, KafkaConditionConfig.class); assertThat(condition, not(nullValue())); assertThat(condition.topic, equalTo("test")); + assertThat(condition.groupId, equalTo("test")); } @Test public void shouldWriteCondition() { - KafkaConditionConfig condition = new KafkaConditionConfig("test"); + KafkaConditionConfig condition = new KafkaConditionConfig("test", "test"); String text = jsonb.toJson(condition); assertThat(text, not(nullValue())); - assertThat(text, equalTo("{\"topic\":\"test\"}")); + assertThat(text, equalTo("{\"topic\":\"test\",\"groupId\":\"test\"}")); } } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheGroupIT.java new file mode 100644 index 0000000000..e7c19b50aa --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheGroupIT.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CACHE_SERVER_BOOTSTRAP; +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CACHE_SERVER_RECONNECT_DELAY; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class CacheGroupIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/group"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(10, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) + .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) + .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("app1") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/rebalance.protocol.highlander/client", + "${app}/rebalance.protocol.highlander/server"}) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldRebalanceLeader() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java new file mode 100644 index 0000000000..a39c8b872a --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java @@ -0,0 +1,133 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class ClientGroupIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/group"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/client.sent.write.abort.before.coordinator.response/client", + "${net}/client.sent.write.abort.before.coordinator.response/server"}) + public void shouldHandleClientSentWriteAbortBeforeCoordinatorResponse() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/rebalance.protocol.highlander/client", + "${net}/rebalance.protocol.highlander/server"}) + + public void shouldLeaveGroupOnGroupRebalanceError() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/leader/client", + "${net}/coordinator.not.available/server"}) + public void shouldHandleCoordinatorNotAvailableError() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/leader/client", + "${net}/coordinator.reject.invalid.consumer/server"}) + public void shouldHRejectInvalidConsumer() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/leader/client", + "${net}/rebalance.protocol.highlander.unknown.member.id/server"}) + public void shouldRebalanceProtocolHighlanderUnknownMemberId() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/rebalance.protocol.highlander.migrate.leader/client", + "${net}/rebalance.protocol.highlander.migrate.leader/server"}) + public void shouldRebalanceProtocolHighlanderMigrateLeader() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/rebalance.protocol.unknown/client", + "${net}/rebalance.protocol.unknown/server"}) + public void shouldRejectSecondStreamOnUnknownProtocol() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/leader/client", + "${net}/rebalance.sync.group/server"}) + public void shouldHandleRebalanceSyncGroup() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupSaslIT.java new file mode 100644 index 0000000000..789359fb95 --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupSaslIT.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class ClientGroupSaslIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", + "io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/group"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + @Test + @Configuration("client.options.sasl.plain.yaml") + @Specification({ + "${app}/leader/client", + "${net}/leader/server"}) + public void shouldBecomeLeader() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json index 2b20c34eef..a27ea5f0a3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json @@ -11,7 +11,7 @@ { "if": { - "properties": + "properties": { "type": { @@ -218,6 +218,11 @@ { "title": "Topic", "type": "string" + }, + "groupId": + { + "title": "groupId", + "type": "string" } }, "additionalProperties": false diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.before.coordinator.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.before.coordinator.response/client.rpt new file mode 100644 index 0000000000..ca416d10ee --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.before.coordinator.response/client.rpt @@ -0,0 +1,32 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write abort diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.before.coordinator.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.before.coordinator.response/server.rpt new file mode 100644 index 0000000000..6d3853b32e --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.before.coordinator.response/server.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +read aborted diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt new file mode 100644 index 0000000000..ea472ace1c --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +read zilla:data.null + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt new file mode 100644 index 0000000000..178b40bc4d --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt new file mode 100644 index 0000000000..567e4f7725 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -0,0 +1,69 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +read zilla:data.null + +read notify ROUTED_BROKER_SERVER + +write aborted + +connect await ROUTED_BROKER_SERVER "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +read zilla:data.null diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt new file mode 100644 index 0000000000..ff4baeea4a --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -0,0 +1,68 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +write flush + +read abort + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +write flush + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt new file mode 100644 index 0000000000..b7765fa2a4 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt @@ -0,0 +1,54 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +read zilla:data.null + +write advise zilla:flush + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +read zilla:data.null + +write close +read closed diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt new file mode 100644 index 0000000000..4f0f18bb9c --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt @@ -0,0 +1,57 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +write flush + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +write flush + +read closed +write close diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt new file mode 100644 index 0000000000..b8293ceeca --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt @@ -0,0 +1,57 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("unknown") + .timeout(45000) + .build() + .build()} + +connected + + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +read zilla:data.null + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("unknown") + .timeout(45000) + .build() + .build()} + +connect aborted diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt new file mode 100644 index 0000000000..a72a8ba9f8 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("unknown") + .timeout(45000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +write flush + +rejected diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt new file mode 100644 index 0000000000..1012892d86 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + + +write abort +read aborted diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt new file mode 100644 index 0000000000..1db15e2632 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read aborted +write abort diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt new file mode 100644 index 0000000000..34b66f8209 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt @@ -0,0 +1,150 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +read 54 # size + (int:newRequestId) + 0 # throttle time + 15s # no coordinator available + 32s "The coordinator is not available" # error no coordinator available + -1 # coordinator node + 0s # host + -1 # port + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 34 # size + (int:newRequestId) + 0 # throttle time + 79s # member id required + -1 # generated id + 0s # protocol name + 0s # leader id + 10s "memberId-1" # consumer member group id + 0 # members + +write 115 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt new file mode 100644 index 0000000000..960d26d2d4 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt @@ -0,0 +1,140 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 54 # size + ${newRequestId} + 0 # throttle time + 15s # no coordinator available + 32s "The coordinator is not available" # error no coordinator available + -1 # coordinator node + 0s # host + -1 # port + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + + +write 34 # size + ${newRequestId} + 0 # throttle time + 79s # member id required + -1 # generated id + 0s # protocol name + 0s # leader id + 10s "memberId-1" # consumer member group id + 0 # members + +read 115 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt new file mode 100644 index 0000000000..bb8ccf51e6 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt @@ -0,0 +1,176 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER_FIRST + +connect await ROUTED_BROKER_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 24 # size + (int:newRequestId) + 0 # throttle time + 16s # not a coordinator for a consumer + -1 # generated id + 0s # protocol name + 0s # leader id + 0s # not a coordinator for a consumer + 0 # members + +write close +read abort + +read notify ROUTED_BROKER_SERVER_SECOND + +connect await ROUTED_BROKER_SERVER_SECOND + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER_THIRD + +connect await ROUTED_BROKER_SERVER_THIRD + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt new file mode 100644 index 0000000000..6f7f230d1a --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt @@ -0,0 +1,153 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 24 # size + ${newRequestId} + 0 # throttle time + 16s # not a coordinator for a consumer + -1 # generated id + 0s # protocol name + 0s # leader id + 0s # not a coordinator for a consumer + 0 # members + +read closed +write aborted + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt new file mode 100644 index 0000000000..71cb8605e4 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -0,0 +1,195 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment + +write close +read abort + +read notify ROUTED_BROKER_SERVER_SECOND + +connect await ROUTED_BROKER_SERVER_SECOND "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER_THIRD + +connect await ROUTED_BROKER_SERVER_THIRD "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 115 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt new file mode 100644 index 0000000000..efaee4ce49 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -0,0 +1,175 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment + +read closed +write aborted + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 115 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt new file mode 100644 index 0000000000..b956d5e1f1 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt @@ -0,0 +1,132 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 24 # size + (int:newRequestId) + 0 # throttle time + 25s # unknown member id + -1 # generated id + 0s # protocol name + 0s # leader id + 0s # consumer member group id + 0 # members + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt new file mode 100644 index 0000000000..06ea422a7a --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt @@ -0,0 +1,123 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + + +write 24 # size + ${newRequestId} + 0 # throttle time + 25s # member id required + -1 # generated id + 0s # protocol name + 0s # leader id + 0s # consumer member group id + 0 # members + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt new file mode 100644 index 0000000000..5165d45fd2 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt @@ -0,0 +1,221 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 34 # size + (int:newRequestId) + 0 # throttle time + 79s # member id required + -1 # generated id + 0s # protocol name + 0s # leader id + 10s "memberId-1" # consumer member group id + 0 # members + +write 115 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment + +write 81 # size + 12s # heartbeat + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + +read 10 # size + (int:newRequestId) + 0 # throttle time + 27s # REBALANCE_IN_PROGRESS + +write 115 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + + +read 170 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 2 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + 10s "memberId-2" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 117 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 2 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + 10s "memberId-2" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment + +write 81 # size + 13s # leave group + 3s # v3 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 1 # assignments + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + +read 70 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 1 # assignments + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + +write close +read abort diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt new file mode 100644 index 0000000000..077b3cffc5 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt @@ -0,0 +1,212 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + + +write 34 # size + ${newRequestId} + 0 # throttle time + 79s # member id required + -1 # generated id + 0s # protocol name + 0s # leader id + 10s "memberId-1" # consumer member group id + 0 # members + +read 115 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment + +read 81 # size + 12s # heartbeat + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + +write 10 # size + ${newRequestId} + 0 # throttle time + 27s # REBALANCE_IN_PROGRESS + +read 115 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + + +write 170 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 2 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + 10s "memberId-2" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 117 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 2 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + 10s "memberId-2" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment + +read 81 # size + 13s # leave group + 3s # v3 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 1 # assignments + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + +write 70 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 1 # assignments + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + +read closed +write aborted diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt new file mode 100644 index 0000000000..03968c7817 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt @@ -0,0 +1,108 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 #port + +write close +read abort + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 102 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 7s "unknown" # protocol name + 0 # metadata + +read 109 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 7s "unknown" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt new file mode 100644 index 0000000000..aecd6f1d65 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt @@ -0,0 +1,97 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 102 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 7s "unknown" # protocol name + 0 # metadata + +write 109 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 7s "unknown" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt new file mode 100644 index 0000000000..8d198b5a1f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt @@ -0,0 +1,154 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 27s # rebalance + 0 # assignment + +write 115 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 4 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt new file mode 100644 index 0000000000..7543f130d7 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt @@ -0,0 +1,144 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 27s # rebalance + 0 # assignment + +read 115 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 4 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt new file mode 100644 index 0000000000..a12b220d76 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt @@ -0,0 +1,165 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 17 # size + 17s # sasl.handshake + 1s # v1 + ${newRequestId} + -1s # no client id + 5s "PLAIN" # mechanism + +read 17 # size + ${newRequestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +write 32 # size + 36s # sasl.authenticate + 1s # v1 + ${newRequestId} + -1s # no client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +read 20 # size + ${newRequestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 #throttle time + 0s #no error + 4s "none" #error message none + 1 #coordinator node + 9s "localhost" #host + 9092 #port + +write close +read abort + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 17 # size + 17s # sasl.handshake + 1s # v1 + ${newRequestId} + -1s # no client id + 5s "PLAIN" # mechanism + +read 17 # size + ${newRequestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +write 32 # size + 36s # sasl.authenticate + 1s # v1 + ${newRequestId} + -1s # no client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +read 20 # size + ${newRequestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt new file mode 100644 index 0000000000..7d5eea0df9 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt @@ -0,0 +1,157 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 17 # size + 17s # sasl.handshake + 1s # v1 + (int:requestId) + -1s # no client id + 5s "PLAIN" # mechanism + +write 17 # size + ${requestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +read 32 # size + 36s # sasl.authenticate + 1s # v1 + (int:requestId) + -1s # no client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +write 20 # size + ${requestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 #throttle time + 0s #no error + 4s "none" #error message none + 1 #coordinator node + 9s "localhost" #host + 9092 #port + +read closed +write aborted + +accepted + +connected + +read 17 # size + 17s # sasl.handshake + 1s # v1 + (int:requestId) + -1s # no client id + 5s "PLAIN" # mechanism + +write 17 # size + ${requestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +read 32 # size + 36s # sasl.authenticate + 1s # v1 + (int:requestId) + -1s # no client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +write 20 # size + ${requestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + + + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java new file mode 100644 index 0000000000..dbf8186465 --- /dev/null +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java @@ -0,0 +1,84 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.kafka.streams.application; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + + +public class GroupIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/group"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${app}/rebalance.protocol.highlander/client", + "${app}/rebalance.protocol.highlander/server"}) + public void shouldLeaveGroupOnGroupRebalanceError() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/client.sent.write.abort.before.coordinator.response/client", + "${app}/client.sent.write.abort.before.coordinator.response/server"}) + public void shouldHandleClientSentWriteAbortBeforeCoordinatorResponse() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/leader/client", + "${app}/leader/server"}) + public void shouldBecameLeader() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/rebalance.protocol.highlander.migrate.leader/client", + "${app}/rebalance.protocol.highlander.migrate.leader/server"}) + public void shouldRebalanceProtocolHighlanderMigrateLeader() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/rebalance.protocol.unknown/client", + "${app}/rebalance.protocol.unknown/server"}) + public void shouldRejectSecondStreamOnUnknownProtocol() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java new file mode 100644 index 0000000000..f9bd895508 --- /dev/null +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java @@ -0,0 +1,110 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.kafka.streams.network; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +public class GroupIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${net}/rebalance.protocol.highlander/client", + "${net}/rebalance.protocol.highlander/server"}) + public void shouldLeaveGroupOnGroupRebalanceError() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/client.sent.write.abort.before.coordinator.response/client", + "${net}/client.sent.write.abort.before.coordinator.response/server"}) + public void shouldHandleClientSentWriteAbortBeforeCoordinatorResponse() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/coordinator.not.available/client", + "${net}/coordinator.not.available/server"}) + public void shouldHandleCoordinatorNotAvailableError() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/coordinator.reject.invalid.consumer/client", + "${net}/coordinator.reject.invalid.consumer/server"}) + public void shouldHRejectInvalidConsumer() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/rebalance.protocol.highlander.unknown.member.id/client", + "${net}/rebalance.protocol.highlander.unknown.member.id/server"}) + public void shouldRebalanceProtocolHighlanderUnknownMemberId() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/rebalance.protocol.highlander.migrate.leader/client", + "${net}/rebalance.protocol.highlander.migrate.leader/server"}) + public void shouldRebalanceProtocolHighlanderMigrateLeader() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/rebalance.protocol.unknown/client", + "${net}/rebalance.protocol.unknown/server"}) + public void shouldRejectSecondStreamOnUnknownProtocol() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/rebalance.sync.group//client", + "${net}/rebalance.sync.group/server"}) + public void shouldHandleRebalanceSyncGroup() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupSaslIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupSaslIT.java new file mode 100644 index 0000000000..dc2c16378c --- /dev/null +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupSaslIT.java @@ -0,0 +1,49 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.kafka.streams.network; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + + +public class GroupSaslIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", + "io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${net}/leader/client", + "${net}/leader/server"}) + public void shouldBecameLeader() throws Exception + { + k3po.finish(); + } +} From 750f43f8d3b17f75e716cf9d87745a1808482b33 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Mon, 31 Jul 2023 16:52:29 -0700 Subject: [PATCH 007/115] Add missing newline at EOF --- .../subscribe.one.message.user.properties.unaltered/client.rpt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt index bd5b56bd3e..289c495304 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt @@ -57,4 +57,4 @@ read [0x30 0x2c] # PUBLISH [0x26] # user property id [0x00 0x04] "row1" # user property key [0x00 0x01] "2" # user property value - "message" # payload \ No newline at end of file + "message" # payload From 54de30cf97b83a2406963392c81d51eb673b3210 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Tue, 1 Aug 2023 08:51:11 -0700 Subject: [PATCH 008/115] Ignore heartbeat if the handshake request hasn't completed yet (#322) --- .../stream/KafkaClientGroupFactory.java | 15 +- .../kafka/internal/stream/ClientGroupIT.java | 10 ++ .../client.rpt | 42 ++++++ .../server.rpt | 45 ++++++ .../client.rpt | 132 ++++++++++++++++++ .../server.rpt | 125 +++++++++++++++++ .../kafka/streams/application/GroupIT.java | 9 ++ .../kafka/streams/network/GroupIT.java | 9 ++ 8 files changed, 381 insertions(+), 6 deletions(-) create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index d044e0c257..0a3867f0cc 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -2623,14 +2623,17 @@ private void doSyncRequest( private void doHeartbeat( long traceId) { - if (heartbeatRequestId != NO_CANCEL_ID) + if (encoder != encodeJoinGroupRequest) { - signaler.cancel(heartbeatRequestId); - heartbeatRequestId = NO_CANCEL_ID; - } + if (heartbeatRequestId != NO_CANCEL_ID) + { + signaler.cancel(heartbeatRequestId); + heartbeatRequestId = NO_CANCEL_ID; + } - encoder = encodeHeartbeatRequest; - signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + encoder = encodeHeartbeatRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } } private void doLeaveGroupRequest( diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java index a39c8b872a..937bbc57a7 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java @@ -130,4 +130,14 @@ public void shouldHandleRebalanceSyncGroup() throws Exception { k3po.finish(); } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/ignore.heartbeat.before.handshake/client", + "${net}/ignore.heartbeat.before.handshake/server"}) + public void shouldIgnoreHeartbeatBeforeHandshakeComplete() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt new file mode 100644 index 0000000000..34d5be7b16 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt @@ -0,0 +1,42 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write advise zilla:flush + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +read zilla:data.null + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt new file mode 100644 index 0000000000..e571392836 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt new file mode 100644 index 0000000000..8697a0555f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt @@ -0,0 +1,132 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_BROKER_SERVER + +connect await ROUTED_BROKER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 34 # size + (int:newRequestId) + 0 # throttle time + 79s # member id required + -1 # generated id + 0s # protocol name + 0s # leader id + 10s "memberId-1" # consumer member group id + 0 # members + +write 115 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +read 112 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 0 # metadata + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt new file mode 100644 index 0000000000..f71705ab1e --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt @@ -0,0 +1,125 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 1 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 105 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + + +write 34 # size + ${newRequestId} + 0 # throttle time + 79s # member id required + -1 # generated id + 0s # protocol name + 0s # leader id + 10s "memberId-1" # consumer member group id + 0 # members + +read 115 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 45000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 0 # metadata + +write 112 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 0 # metadata + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment + + diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java index dbf8186465..ae9738a9b4 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java @@ -81,4 +81,13 @@ public void shouldRejectSecondStreamOnUnknownProtocol() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/ignore.heartbeat.before.handshake/client", + "${app}/ignore.heartbeat.before.handshake/server"}) + public void shouldIgnoreHeartbeatBeforeHandshakeComplete() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java index f9bd895508..a9f0dbe8b5 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java @@ -107,4 +107,13 @@ public void shouldHandleRebalanceSyncGroup() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/ignore.heartbeat.before.handshake/client", + "${net}/ignore.heartbeat.before.handshake/server"}) + public void shouldIgnoreHeartbeatBeforeHandshakeComplete() throws Exception + { + k3po.finish(); + } } From 3048d722880cfe4264816aa191a5eae2af1b537c Mon Sep 17 00:00:00 2001 From: John Fallows Date: Tue, 1 Aug 2023 10:09:36 -0700 Subject: [PATCH 009/115] Support local zpmw install (#321) * Rename files to support local zpmw install * Improve log output for zpmw install * Update license checks --- cloud/docker-image/pom.xml | 3 +++ cloud/docker-image/src/main/docker/.gitignore | 3 ++- .../src/main/docker/incubator/Dockerfile | 2 +- .../incubator/{zpm.json => zpm.json.template} | 0 .../src/main/docker/release/Dockerfile | 2 +- .../release/{zpm.json => zpm.json.template} | 0 .../internal/commands/install/ZpmInstall.java | 27 ++++++++++++++----- 7 files changed, 27 insertions(+), 10 deletions(-) rename cloud/docker-image/src/main/docker/incubator/{zpm.json => zpm.json.template} (100%) rename cloud/docker-image/src/main/docker/release/{zpm.json => zpm.json.template} (100%) diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 67f2e68fce..5693843aae 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -202,6 +202,9 @@ src/main/docker/*/zpmw src/main/docker/*/zilla.properties + src/main/docker/*/zpm.json.template + src/main/docker/*/zpm.json + src/main/docker/*/.zpm/** diff --git a/cloud/docker-image/src/main/docker/.gitignore b/cloud/docker-image/src/main/docker/.gitignore index e52b514a42..ab5fa6ba9e 100644 --- a/cloud/docker-image/src/main/docker/.gitignore +++ b/cloud/docker-image/src/main/docker/.gitignore @@ -1,2 +1,3 @@ zpmw - +zpm.json +zpm-lock.json diff --git a/cloud/docker-image/src/main/docker/incubator/Dockerfile b/cloud/docker-image/src/main/docker/incubator/Dockerfile index 6babc87b1e..7a0f47e9c0 100644 --- a/cloud/docker-image/src/main/docker/incubator/Dockerfile +++ b/cloud/docker-image/src/main/docker/incubator/Dockerfile @@ -18,7 +18,7 @@ FROM eclipse-temurin:17-alpine AS build COPY maven /root/.m2/repository COPY zpmw zpmw -COPY zpm.json zpm.json.template +COPY zpm.json.template zpm.json.template RUN apk add --no-cache gettext RUN cat zpm.json.template | env VERSION=${project.version} envsubst > zpm.json diff --git a/cloud/docker-image/src/main/docker/incubator/zpm.json b/cloud/docker-image/src/main/docker/incubator/zpm.json.template similarity index 100% rename from cloud/docker-image/src/main/docker/incubator/zpm.json rename to cloud/docker-image/src/main/docker/incubator/zpm.json.template diff --git a/cloud/docker-image/src/main/docker/release/Dockerfile b/cloud/docker-image/src/main/docker/release/Dockerfile index 6babc87b1e..7a0f47e9c0 100644 --- a/cloud/docker-image/src/main/docker/release/Dockerfile +++ b/cloud/docker-image/src/main/docker/release/Dockerfile @@ -18,7 +18,7 @@ FROM eclipse-temurin:17-alpine AS build COPY maven /root/.m2/repository COPY zpmw zpmw -COPY zpm.json zpm.json.template +COPY zpm.json.template zpm.json.template RUN apk add --no-cache gettext RUN cat zpm.json.template | env VERSION=${project.version} envsubst > zpm.json diff --git a/cloud/docker-image/src/main/docker/release/zpm.json b/cloud/docker-image/src/main/docker/release/zpm.json.template similarity index 100% rename from cloud/docker-image/src/main/docker/release/zpm.json rename to cloud/docker-image/src/main/docker/release/zpm.json.template diff --git a/manager/src/main/java/io/aklivity/zilla/manager/internal/commands/install/ZpmInstall.java b/manager/src/main/java/io/aklivity/zilla/manager/internal/commands/install/ZpmInstall.java index 8122cc14dc..eacaadf4c3 100644 --- a/manager/src/main/java/io/aklivity/zilla/manager/internal/commands/install/ZpmInstall.java +++ b/manager/src/main/java/io/aklivity/zilla/manager/internal/commands/install/ZpmInstall.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.manager.internal.commands.install; import static io.aklivity.zilla.manager.internal.settings.ZpmSecrets.decryptSecret; +import static java.io.OutputStream.nullOutputStream; import static java.nio.charset.StandardCharsets.UTF_8; import static java.nio.file.Files.createDirectories; import static java.nio.file.Files.getLastModifiedTime; @@ -34,6 +35,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.io.PrintStream; import java.lang.module.ModuleDescriptor; import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; @@ -178,13 +180,13 @@ public void invoke() ZpmModule delegate = new ZpmModule(); Collection modules = discoverModules(artifacts); migrateUnnamed(modules, delegate); - generateSystemOnlyAutomatic(modules); + generateSystemOnlyAutomatic(logger, modules); delegateAutomatic(modules, delegate); copyNonDelegating(modules); if (!delegate.paths.isEmpty()) { - generateDelegate(delegate); + generateDelegate(logger, delegate); generateDelegating(modules); } @@ -396,6 +398,7 @@ private void delegateModule( } private void generateSystemOnlyAutomatic( + MessageLogger logger, Collection modules) throws IOException { Map promotions = new IdentityHashMap<>(); @@ -415,21 +418,24 @@ private void generateSystemOnlyAutomatic( Path artifactPath = module.paths.iterator().next(); ToolProvider jdeps = ToolProvider.findFirst("jdeps").get(); + PrintStream nullOutput = new PrintStream(nullOutputStream()); jdeps.run( - System.out, - System.err, + nullOutput, + nullOutput, "--generate-open-module", generatedModulesDir.toString(), artifactPath.toString()); Path generatedModuleInfo = generatedModuleDir.resolve(MODULE_INFO_JAVA_FILENAME); if (Files.exists(generatedModuleInfo)) { + logger.info(String.format("Generated module info for system-only automatic module: %s", module.name)); + expandJar(generatedModuleDir, artifactPath); ToolProvider javac = ToolProvider.findFirst("javac").get(); javac.run( - System.out, - System.err, + nullOutput, + nullOutput, "-d", generatedModuleDir.toString(), generatedModuleInfo.toString()); @@ -478,6 +484,7 @@ private void copyNonDelegating( } private void generateDelegate( + MessageLogger logger, ZpmModule delegate) throws IOException { Path generatedModulesDir = generatedDir.resolve("modules"); @@ -562,7 +569,13 @@ else if (entryNames.add(entryName)) jdepsArgs.toArray(String[]::new)); Path generatedModuleInfo = generatedDelegateDir.resolve(MODULE_INFO_JAVA_FILENAME); - assert Files.exists(generatedModuleInfo); + + if (!Files.exists(generatedModuleInfo)) + { + throw new IOException("Failed to generate module info for delegate module"); + } + + logger.info(String.format("Generated module info for delegate module\n")); String moduleInfoContents = Files.readString(generatedModuleInfo); Pattern pattern = Pattern.compile("(?:provides\\s+)([^\\s]+)(?:\\s+with)"); From a7927357b1d987f535e51ef304924ab22592c0c1 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Tue, 1 Aug 2023 10:54:18 -0700 Subject: [PATCH 010/115] Add README for running Zilla locally --- .../src/main/docker/incubator/README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 cloud/docker-image/src/main/docker/incubator/README.md diff --git a/cloud/docker-image/src/main/docker/incubator/README.md b/cloud/docker-image/src/main/docker/incubator/README.md new file mode 100644 index 0000000000..5dc79cfcf6 --- /dev/null +++ b/cloud/docker-image/src/main/docker/incubator/README.md @@ -0,0 +1,13 @@ +### Running locally + +```bash +cat zpm.json.template | env VERSION=develop-SNAPSHOT envsubst > zpm.json +``` + +```bash +./zpmw install --debug --exclude-remote-repositories +``` + +``` +./zilla start --config +``` From b3643b95df6c2edca10960ad3a174ed02d995c99 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 3 Aug 2023 16:49:33 -0700 Subject: [PATCH 011/115] Support zilla.yaml config reader and writer (#323) --- cloud/docker-image/pom.xml | 5 + .../config/AmqpConditionConfig.java | 2 +- .../config/AmqpConditionConfigAdapter.java | 1 + .../internal/config/AmqpConditionMatcher.java | 1 + .../amqp/internal/config/AmqpRouteConfig.java | 1 + .../src/main/moditect/module-info.java | 2 + .../AmqpConditionConfigAdapterTest.java | 2 + .../config/MqttKafkaConditionConfig.java | 2 +- .../MqttKafkaConditionConfigAdapter.java | 1 + .../src/main/moditect/module-info.java | 2 + .../MqttKafkaConditionConfigAdapterTest.java | 2 + .../config/MqttConditionConfig.java | 2 +- .../config/MqttConditionConfigAdapter.java | 1 + .../internal/config/MqttConditionMatcher.java | 1 + .../mqtt/internal/config/MqttRouteConfig.java | 1 + .../src/main/moditect/module-info.java | 2 + .../MqttConditionConfigAdapterTest.java | 2 + incubator/command-log/NOTICE | 5 + .../config/OtlpEndpointConfig.java | 2 +- .../config/OtlpOptionsConfig.java | 2 +- .../config/OtlpOverridesConfig.java | 2 +- .../otlp/internal/OltpExporterHandler.java | 4 +- .../internal/config/OtlpEndpointAdapter.java | 3 + .../internal/config/OtlpExporterConfig.java | 3 +- .../config/OtlpOptionsConfigAdapter.java | 2 + .../internal/config/OtlpOverridesAdapter.java | 2 + .../internal/config/OtlpSignalsAdapter.java | 4 +- .../src/main/moditect/module-info.java | 2 + .../config/OltpOptionsConfigAdapterTest.java | 6 +- .../config/OtlpExporterConfigTest.java | 5 +- pom.xml | 1 + .../config/FileSystemOptionsConfig.java | 2 +- .../config/FileSystemSymbolicLinksConfig.java | 2 +- .../config/FileSystemBindingConfig.java | 1 + .../FileSystemOptionsConfigAdapter.java | 4 +- .../stream/FileSystemServerFactory.java | 4 +- .../src/main/moditect/module-info.java | 2 + .../FileSystemOptionsConfigAdapterTest.java | 3 + .../config/GrpcKafkaConditionConfig.java | 6 +- .../config/GrpcKafkaCorrelationConfig.java | 2 +- .../config/GrpcKafkaIdempotencyConfig.java | 2 +- .../GrpcKafkaMetadataValueConfig.java} | 6 +- .../config/GrpcKafkaOptionsConfig.java | 2 +- .../config/GrpcKafkaReliabilityConfig.java | 2 +- .../config/GrpcKafkaBindingConfig.java | 1 + .../GrpcKafkaConditionConfigAdapter.java | 7 +- .../config/GrpcKafkaConditionMatcher.java | 8 +- .../config/GrpcKafkaOptionsConfigAdapter.java | 4 + .../internal/config/GrpcKafkaRouteConfig.java | 2 + .../config/GrpcKafkaWithProduceResult.java | 1 + .../config/GrpcKafkaWithResolver.java | 1 + .../src/main/moditect/module-info.java | 2 + .../GrpcKafkaConditionConfigAdapterTest.java | 6 +- .../GrpcKafkaOptionsConfigAdapterTest.java | 8 +- .../config/GrpcConditionConfig.java | 6 +- .../GrpcMetadataValueConfig.java} | 6 +- .../config/GrpcMethodConfig.java | 2 +- .../config/GrpcOptionsConfig.java | 2 +- .../config/GrpcProtobufConfig.java | 2 +- .../config/GrpcServiceConfig.java | 2 +- .../internal/config/GrpcBindingConfig.java | 6 +- .../config/GrpcConditionConfigAdapter.java | 6 +- .../internal/config/GrpcConditionMatcher.java | 8 +- .../config/GrpcOptionsConfigAdapter.java | 4 +- .../grpc/internal/config/GrpcRouteConfig.java | 1 + .../config/GrpcServiceDefinitionListener.java | 2 + .../src/main/moditect/module-info.java | 2 + .../config/GrpcOptionsConfigAdapterTest.java | 4 + .../config/HttpFileSystemConditionConfig.java | 2 +- .../HttpFileSystemConditionConfigAdapter.java | 1 + .../HttpFileSystemConditionMatcher.java | 2 + .../config/HttpFileSystemRouteConfig.java | 1 + .../src/main/moditect/module-info.java | 2 + ...pFileSystemConditionConfigAdapterTest.java | 2 + .../config/HttpKafkaConditionConfig.java | 2 +- .../config/HttpKafkaCorrelationConfig.java | 2 +- .../config/HttpKafkaIdempotencyConfig.java | 2 +- .../config/HttpKafkaOptionsConfig.java | 2 +- .../config/HttpKafkaBindingConfig.java | 1 + .../HttpKafkaConditionConfigAdapter.java | 1 + .../config/HttpKafkaConditionMatcher.java | 2 + .../config/HttpKafkaOptionsConfigAdapter.java | 3 + .../internal/config/HttpKafkaRouteConfig.java | 2 + .../config/HttpKafkaWithProduceResult.java | 1 + .../config/HttpKafkaWithResolver.java | 1 + .../src/main/moditect/module-info.java | 2 + .../HttpKafkaConditionConfigAdapterTest.java | 2 + .../HttpKafkaOptionsConfigAdapterTest.java | 3 + .../config/HttpAccessControlConfig.java | 6 +- .../config/HttpAuthorizationConfig.java | 2 +- .../config/HttpConditionConfig.java | 2 +- .../config/HttpOptionsConfig.java | 2 +- .../{internal => }/config/HttpVersion.java | 2 +- .../internal/config/HttpBindingConfig.java | 11 +- .../config/HttpConditionConfigAdapter.java | 1 + .../internal/config/HttpConditionMatcher.java | 2 + .../config/HttpOptionsConfigAdapter.java | 16 +- .../http/internal/config/HttpRouteConfig.java | 1 + .../internal/stream/HttpClientFactory.java | 6 +- .../internal/stream/HttpServerFactory.java | 8 +- .../src/main/moditect/module-info.java | 2 + .../HttpConditionConfigAdapterTest.java | 2 + .../config/HttpOptionsConfigAdapterTest.java | 14 +- .../config/KafkaGrpcConditionConfig.java | 2 +- .../config/KafkaGrpcCorrelationConfig.java | 2 +- .../config/KafkaGrpcIdempotencyConfig.java | 2 +- .../config/KafkaGrpcOptionsConfig.java | 2 +- .../config/KafkaGrpcBindingConfig.java | 1 + .../KafkaGrpcConditionConfigAdapter.java | 1 + .../config/KafkaGrpcConditionResolver.java | 2 + .../config/KafkaGrpcConditionResult.java | 1 + .../config/KafkaGrpcOptionsConfigAdapter.java | 3 + .../internal/config/KafkaGrpcRouteConfig.java | 2 + .../stream/KafkaGrpcFetchHeaderHelper.java | 2 +- .../src/main/moditect/module-info.java | 2 + .../KafkaGrpcConditionConfigAdapterTest.java | 1 + .../KafkaGrpcOptionsConfigAdapterTest.java | 3 + .../config/KafkaConditionConfig.java | 2 +- .../config/KafkaOptionsConfig.java | 2 +- .../config/KafkaSaslConfig.java | 2 +- .../config/KafkaTopicConfig.java | 2 +- .../internal/config/KafkaBindingConfig.java | 3 + .../config/KafkaConditionConfigAdapter.java | 1 + .../config/KafkaConditionMatcher.java | 2 + .../config/KafkaOptionsConfigAdapter.java | 3 + .../internal/config/KafkaRouteConfig.java | 1 + ...echanism.java => KafkaScramMechanism.java} | 12 +- .../config/KafkaTopicConfigAdapter.java | 1 + .../KafkaCacheServerBootstrapFactory.java | 2 +- .../stream/KafkaCacheServerFetchFactory.java | 2 +- .../stream/KafkaClientDescribeFactory.java | 2 +- .../stream/KafkaClientFetchFactory.java | 2 +- .../stream/KafkaClientGroupFactory.java | 2 +- .../stream/KafkaClientMetaFactory.java | 2 +- .../stream/KafkaClientProduceFactory.java | 2 +- .../stream/KafkaClientSaslHandshaker.java | 8 +- .../src/main/moditect/module-info.java | 2 + .../KafkaConditionConfigAdapterTest.java | 2 + .../config/KafkaOptionsConfigAdapterTest.java | 4 + .../config/ProxyAddressConfig.java | 2 +- .../config/ProxyConditionConfig.java | 2 +- .../config/ProxyInfoConfig.java | 4 +- .../config/ProxyOptionsConfig.java | 2 +- .../config/ProxyAddressConfigAdapter.java | 2 + .../internal/config/ProxyBindingConfig.java | 1 + .../config/ProxyConditionConfigAdapter.java | 3 + .../config/ProxyConditionMatcher.java | 3 + .../config/ProxyInfoConfigAdapter.java | 2 + .../config/ProxyOptionsConfigAdapter.java | 1 + .../internal/config/ProxyRouteConfig.java | 1 + .../src/main/moditect/module-info.java | 2 + .../ProxyConditionConfigAdapterTest.java | 4 + .../internal/config/ProxyMatcherTest.java | 3 + .../config/ProxyOptionsConfigAdapterTest.java | 2 + .../config/SseKafkaConditionConfig.java | 2 +- .../SseKafkaConditionConfigAdapter.java | 1 + .../config/SseKafkaConditionMatcher.java | 2 + .../internal/config/SseKafkaRouteConfig.java | 1 + .../src/main/moditect/module-info.java | 2 + .../SseKafkaConditionConfigAdapterTest.java | 2 + .../config/SseConditionConfig.java | 2 +- .../config/SseOptionsConfig.java | 11 +- .../sse/internal/config/SseBindingConfig.java | 4 +- .../config/SseConditionConfigAdapter.java | 1 + .../internal/config/SseConditionMatcher.java | 2 + .../config/SseOptionsConfigAdapter.java | 6 +- .../sse/internal/config/SseRouteConfig.java | 1 + .../src/main/moditect/module-info.java | 2 + .../config/SseConditionConfigAdapterTest.java | 2 + .../config/SseOptionsConfigAdapterTest.java | 2 + runtime/binding-tcp/NOTICE | 6 - runtime/binding-tcp/pom.xml | 1 + .../config/TcpConditionConfig.java | 2 +- .../config/TcpOptionsConfig.java | 2 +- .../tcp/internal/config/TcpBindingConfig.java | 1 + .../config/TcpConditionConfigAdapter.java | 1 + .../internal/config/TcpConditionMatcher.java | 1 + .../config/TcpOptionsConfigAdapter.java | 1 + .../tcp/internal/config/TcpRouteConfig.java | 1 + .../config/TcpServerBindingConfig.java | 2 + .../tcp/internal/stream/TcpClientFactory.java | 2 +- .../tcp/internal/stream/TcpClientRouter.java | 2 +- .../tcp/internal/stream/TcpServerFactory.java | 2 +- .../src/main/moditect/module-info.java | 2 + .../config/TcpConditionConfigAdapterTest.java | 2 + .../config/TcpOptionsConfigAdapterTest.java | 2 + .../config/TlsConditionConfig.java | 2 +- .../tls/{internal => }/config/TlsMutual.java | 2 +- .../config/TlsOptionsConfig.java | 2 +- .../tls/internal/config/TlsBindingConfig.java | 2 + .../config/TlsConditionConfigAdapter.java | 1 + .../internal/config/TlsConditionMatcher.java | 2 + .../config/TlsOptionsConfigAdapter.java | 4 +- .../tls/internal/config/TlsRouteConfig.java | 1 + .../src/main/moditect/module-info.java | 2 + .../config/TlsConditionConfigAdapterTest.java | 2 + .../config/TlsOptionsConfigAdapterTest.java | 4 +- .../config/WsConditionConfig.java | 2 +- .../config/WsOptionsConfig.java | 2 +- .../ws/internal/config/WsBindingConfig.java | 1 + .../config/WsConditionConfigAdapter.java | 1 + .../internal/config/WsConditionMatcher.java | 2 + .../config/WsOptionsConfigAdapter.java | 1 + .../ws/internal/config/WsRouteConfig.java | 1 + .../src/main/moditect/module-info.java | 2 + .../config/WsConditionConfigAdapterTest.java | 2 + .../config/WsOptionsConfigAdapterTest.java | 2 + runtime/engine/NOTICE | 5 + runtime/engine/pom.xml | 5 + .../engine/config/ConfigException.java | 27 ++++ .../runtime/engine/config/ConfigReader.java | 150 ++++++++++++++++++ .../runtime/engine/config/ConfigWriter.java | 102 ++++++++++++ .../engine/internal/config/KindAdapter.java | 3 - .../internal/config/NamspaceRefAdapter.java | 3 - .../engine/internal/config/RouteAdapter.java | 5 +- .../internal/config/TelemetryRefAdapter.java | 8 - .../schema}/SchemaDecorator.java | 2 +- .../schema}/UniquePropertyKeysSchema.java | 2 +- .../registry/ConfigurationManager.java | 98 ++---------- .../engine/src/main/moditect/module-info.java | 1 + .../engine/config/ConfigWriterTest.java | 58 +++++++ .../{internal => }/config/JwtKeyConfig.java | 2 +- .../config/JwtKeySetConfig.java | 2 +- .../config/JwtOptionsConfig.java | 2 +- .../guard/jwt/internal/JwtGuardContext.java | 2 +- .../guard/jwt/internal/JwtGuardHandler.java | 6 +- .../internal/config/JwtKeyConfigAdapter.java | 2 + .../config/JwtKeySetConfigAdapter.java | 3 + .../config/JwtOptionsConfigAdapter.java | 2 + .../src/main/moditect/module-info.java | 2 + .../jwt/internal/JwtGuardHandlerTest.java | 2 +- .../guard/jwt/internal/JwtGuardTest.java | 2 +- .../config/JwtKeySetConfigAdapterTest.java | 2 + .../config/JwtOptionsConfigAdapterTest.java | 3 + .../jwt/internal/keys/JwtKeyConfigs.java | 2 +- .../config/FileSystemOptionsConfig.java | 14 +- .../FileSystemStoreConfig.java} | 6 +- .../internal/FileSystemContext.java | 2 +- .../internal/FileSystemVaultHandler.java | 12 +- .../FileSystemOptionsConfigAdapter.java | 10 +- ...java => FileSystemStoreConfigAdapter.java} | 10 +- .../src/main/moditect/module-info.java | 2 + .../internal/FileSystemVaultTest.java | 12 +- .../FileSystemOptionsConfigAdapterTest.java | 7 +- 244 files changed, 824 insertions(+), 305 deletions(-) rename incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/{internal => }/config/AmqpConditionConfig.java (94%) rename incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/{internal => }/config/MqttKafkaConditionConfig.java (92%) rename incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/{internal => }/config/MqttConditionConfig.java (94%) rename incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/{internal => }/config/OtlpEndpointConfig.java (94%) rename incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/{internal => }/config/OtlpOptionsConfig.java (94%) rename incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/{internal => }/config/OtlpOverridesConfig.java (92%) rename runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/{internal => }/config/FileSystemOptionsConfig.java (95%) rename runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/{internal => }/config/FileSystemSymbolicLinksConfig.java (90%) rename runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/{internal => }/config/GrpcKafkaConditionConfig.java (84%) rename runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/{internal => }/config/GrpcKafkaCorrelationConfig.java (94%) rename runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/{internal => }/config/GrpcKafkaIdempotencyConfig.java (92%) rename runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/{internal/config/GrpcKafkaMetadataValue.java => config/GrpcKafkaMetadataValueConfig.java} (86%) rename runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/{internal => }/config/GrpcKafkaOptionsConfig.java (94%) rename runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/{internal => }/config/GrpcKafkaReliabilityConfig.java (93%) rename runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/{internal => }/config/GrpcConditionConfig.java (84%) rename runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/{internal/config/GrpcMetadataValue.java => config/GrpcMetadataValueConfig.java} (87%) rename runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/{internal => }/config/GrpcMethodConfig.java (92%) rename runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/{internal => }/config/GrpcOptionsConfig.java (93%) rename runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/{internal => }/config/GrpcProtobufConfig.java (93%) rename runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/{internal => }/config/GrpcServiceConfig.java (93%) rename runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/{internal => }/config/HttpFileSystemConditionConfig.java (92%) rename runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/{internal => }/config/HttpKafkaConditionConfig.java (93%) rename runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/{internal => }/config/HttpKafkaCorrelationConfig.java (93%) rename runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/{internal => }/config/HttpKafkaIdempotencyConfig.java (92%) rename runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/{internal => }/config/HttpKafkaOptionsConfig.java (93%) rename runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/{internal => }/config/HttpAccessControlConfig.java (97%) rename runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/{internal => }/config/HttpAuthorizationConfig.java (97%) rename runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/{internal => }/config/HttpConditionConfig.java (93%) rename runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/{internal => }/config/HttpOptionsConfig.java (95%) rename runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/{internal => }/config/HttpVersion.java (94%) rename runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/{internal => }/config/KafkaGrpcConditionConfig.java (96%) rename runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/{internal => }/config/KafkaGrpcCorrelationConfig.java (94%) rename runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/{internal => }/config/KafkaGrpcIdempotencyConfig.java (92%) rename runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/{internal => }/config/KafkaGrpcOptionsConfig.java (94%) rename runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/{internal => }/config/KafkaConditionConfig.java (93%) rename runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/{internal => }/config/KafkaOptionsConfig.java (94%) rename runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/{internal => }/config/KafkaSaslConfig.java (94%) rename runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/{internal => }/config/KafkaTopicConfig.java (96%) rename runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/{ScramMechanism.java => KafkaScramMechanism.java} (84%) rename runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/{internal => }/config/ProxyAddressConfig.java (93%) rename runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/{internal => }/config/ProxyConditionConfig.java (95%) rename runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/{internal => }/config/ProxyInfoConfig.java (89%) rename runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/{internal => }/config/ProxyOptionsConfig.java (92%) rename runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/{internal => }/config/SseKafkaConditionConfig.java (92%) rename runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/{internal => }/config/SseConditionConfig.java (93%) rename runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/{internal => }/config/SseOptionsConfig.java (80%) rename runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/{internal => }/config/TcpConditionConfig.java (94%) rename runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/{internal => }/config/TcpOptionsConfig.java (95%) rename runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/{internal => }/config/TlsConditionConfig.java (93%) rename runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/{internal => }/config/TlsMutual.java (91%) rename runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/{internal => }/config/TlsOptionsConfig.java (96%) rename runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/{internal => }/config/WsConditionConfig.java (94%) rename runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/{internal => }/config/WsOptionsConfig.java (94%) create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigException.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigReader.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java rename runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/{registry/json => config/schema}/SchemaDecorator.java (98%) rename runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/{registry/json => config/schema}/UniquePropertyKeysSchema.java (98%) create mode 100644 runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java rename runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/{internal => }/config/JwtKeyConfig.java (95%) rename runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/{internal => }/config/JwtKeySetConfig.java (92%) rename runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/{internal => }/config/JwtOptionsConfig.java (96%) rename runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/{internal => }/config/FileSystemOptionsConfig.java (74%) rename runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/{internal/config/FileSystemStore.java => config/FileSystemStoreConfig.java} (87%) rename runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/{FileSystemStoreAdapter.java => FileSystemStoreConfigAdapter.java} (83%) diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 5693843aae..3d0fb42c96 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -201,6 +201,7 @@ src/main/docker/*/zpmw + src/main/docker/*/zilla src/main/docker/*/zilla.properties src/main/docker/*/zpm.json.template src/main/docker/*/zpm.json @@ -288,6 +289,10 @@ org/slf4j/** org/antlr/** org/sonatype/oss/** + com/fasterxml/oss-parent/** + com/fasterxml/jackson/** + org/yaml/snakeyaml/** + org/junit/** diff --git a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfig.java b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/config/AmqpConditionConfig.java similarity index 94% rename from incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfig.java rename to incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/config/AmqpConditionConfig.java index 22ed7f7782..4571980517 100644 --- a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfig.java +++ b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/config/AmqpConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.amqp.internal.config; +package io.aklivity.zilla.runtime.binding.amqp.config; import io.aklivity.zilla.runtime.binding.amqp.internal.types.AmqpCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfigAdapter.java b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfigAdapter.java index f497ee70ee..ffb70b35e3 100644 --- a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfigAdapter.java +++ b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.amqp.config.AmqpConditionConfig; import io.aklivity.zilla.runtime.binding.amqp.internal.AmqpBinding; import io.aklivity.zilla.runtime.binding.amqp.internal.types.AmqpCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionMatcher.java b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionMatcher.java index 46253cd837..8bc02d652c 100644 --- a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionMatcher.java +++ b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionMatcher.java @@ -18,6 +18,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.amqp.config.AmqpConditionConfig; import io.aklivity.zilla.runtime.binding.amqp.internal.types.AmqpCapabilities; public final class AmqpConditionMatcher diff --git a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpRouteConfig.java b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpRouteConfig.java index b64396f68c..017bd58877 100644 --- a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpRouteConfig.java +++ b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpRouteConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.amqp.config.AmqpConditionConfig; import io.aklivity.zilla.runtime.binding.amqp.internal.types.AmqpCapabilities; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/incubator/binding-amqp/src/main/moditect/module-info.java b/incubator/binding-amqp/src/main/moditect/module-info.java index a4f7f980cb..b89cf580bf 100644 --- a/incubator/binding-amqp/src/main/moditect/module-info.java +++ b/incubator/binding-amqp/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.amqp.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.amqp.internal.AmqpBindingFactorySpi; diff --git a/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfigAdapterTest.java b/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfigAdapterTest.java index 60884ecbb2..90b509f550 100644 --- a/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfigAdapterTest.java +++ b/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpConditionConfigAdapterTest.java @@ -29,6 +29,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.amqp.config.AmqpConditionConfig; + public class AmqpConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfig.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/config/MqttKafkaConditionConfig.java similarity index 92% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfig.java rename to incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/config/MqttKafkaConditionConfig.java index 9ba2880b17..8df04215ba 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfig.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/config/MqttKafkaConditionConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.mqtt.kafka.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java index 22fb457072..ca3ed0595c 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java @@ -19,6 +19,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.config.MqttKafkaConditionConfig; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java b/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java index 2fe3bda1fa..dba9dc0234 100644 --- a/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java +++ b/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java @@ -16,6 +16,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.mqtt.kafka.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaBindingFactorySpi; diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java index 00c4d8b488..6c4eb05319 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java @@ -26,6 +26,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.config.MqttKafkaConditionConfig; + public class MqttKafkaConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java similarity index 94% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfig.java rename to incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java index 0492fd2190..82d6dedb5f 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfig.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.mqtt.internal.config; +package io.aklivity.zilla.runtime.binding.mqtt.config; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java index 32468e6666..caa1b367e9 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBinding; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java index e5a4575f15..c1de099fea 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java @@ -18,6 +18,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; public final class MqttConditionMatcher diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java index 6e9c214e20..be2e40fda1 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/incubator/binding-mqtt/src/main/moditect/module-info.java b/incubator/binding-mqtt/src/main/moditect/module-info.java index 2a88753954..bed948cf0f 100644 --- a/incubator/binding-mqtt/src/main/moditect/module-info.java +++ b/incubator/binding-mqtt/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.mqtt.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBindingFactorySpi; diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java index ac0a56e001..aa51fec231 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java @@ -29,6 +29,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; + public class MqttConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/incubator/command-log/NOTICE b/incubator/command-log/NOTICE index 825692ba5b..e632206ebd 100644 --- a/incubator/command-log/NOTICE +++ b/incubator/command-log/NOTICE @@ -15,9 +15,14 @@ This project includes: agrona under The Apache License, Version 2.0 Apache Commons CLI under Apache License, Version 2.0 ICU4J under Unicode/ICU License + Jackson-annotations under The Apache Software License, Version 2.0 + Jackson-core under The Apache Software License, Version 2.0 + jackson-databind under The Apache Software License, Version 2.0 + Jackson-dataformat-YAML under The Apache Software License, Version 2.0 Jakarta JSON Processing API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception JSON-B API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception org.leadpony.justify under The Apache Software License, Version 2.0 + SnakeYAML under Apache License, Version 2.0 zilla::runtime::engine under The Apache Software License, Version 2.0 diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpEndpointConfig.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpEndpointConfig.java similarity index 94% rename from incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpEndpointConfig.java rename to incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpEndpointConfig.java index f6844e3006..6b7eea006b 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpEndpointConfig.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpEndpointConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.exporter.otlp.internal.config; +package io.aklivity.zilla.runtime.exporter.otlp.config; import java.net.URI; diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOptionsConfig.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOptionsConfig.java similarity index 94% rename from incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOptionsConfig.java rename to incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOptionsConfig.java index 119a961a40..d5bb3f24c5 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOptionsConfig.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOptionsConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.exporter.otlp.internal.config; +package io.aklivity.zilla.runtime.exporter.otlp.config; import java.util.Set; diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOverridesConfig.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOverridesConfig.java similarity index 92% rename from incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOverridesConfig.java rename to incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOverridesConfig.java index 95c8f81384..53a5b5596e 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOverridesConfig.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOverridesConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.exporter.otlp.internal.config; +package io.aklivity.zilla.runtime.exporter.otlp.config; import java.net.URI; diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/OltpExporterHandler.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/OltpExporterHandler.java index 194e71208d..1fff2ab6bd 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/OltpExporterHandler.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/OltpExporterHandler.java @@ -14,7 +14,7 @@ */ package io.aklivity.zilla.runtime.exporter.otlp.internal; -import static io.aklivity.zilla.runtime.exporter.otlp.internal.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; +import static io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; import java.net.HttpURLConnection; import java.net.URI; @@ -34,8 +34,8 @@ import io.aklivity.zilla.runtime.engine.exporter.ExporterHandler; import io.aklivity.zilla.runtime.engine.metrics.Collector; import io.aklivity.zilla.runtime.engine.metrics.reader.MetricsReader; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig; import io.aklivity.zilla.runtime.exporter.otlp.internal.config.OtlpExporterConfig; -import io.aklivity.zilla.runtime.exporter.otlp.internal.config.OtlpOptionsConfig; import io.aklivity.zilla.runtime.exporter.otlp.internal.serializer.OtlpMetricsSerializer; public class OltpExporterHandler implements ExporterHandler diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpEndpointAdapter.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpEndpointAdapter.java index 25bf80fcea..1ba739dffc 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpEndpointAdapter.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpEndpointAdapter.java @@ -21,6 +21,9 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpEndpointConfig; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOverridesConfig; + public class OtlpEndpointAdapter implements JsonbAdapter { private static final String PROTOCOL_NAME = "protocol"; diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfig.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfig.java index a7cac677c2..b6b15c2cf5 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfig.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfig.java @@ -14,13 +14,14 @@ */ package io.aklivity.zilla.runtime.exporter.otlp.internal.config; -import static io.aklivity.zilla.runtime.exporter.otlp.internal.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; +import static io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; import java.net.URI; import java.time.Duration; import java.util.Set; import io.aklivity.zilla.runtime.engine.config.ExporterConfig; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig; public class OtlpExporterConfig { diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOptionsConfigAdapter.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOptionsConfigAdapter.java index ebf61f6737..d49b32708d 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOptionsConfigAdapter.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOptionsConfigAdapter.java @@ -25,6 +25,8 @@ import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpEndpointConfig; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig; import io.aklivity.zilla.runtime.exporter.otlp.internal.OtlpExporter; public class OtlpOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOverridesAdapter.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOverridesAdapter.java index 708687ac2e..5957e244b8 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOverridesAdapter.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpOverridesAdapter.java @@ -21,6 +21,8 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOverridesConfig; + public class OtlpOverridesAdapter implements JsonbAdapter { private static final String METRICS_NAME = "metrics"; diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpSignalsAdapter.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpSignalsAdapter.java index 2e799f6eff..7e8f90a052 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpSignalsAdapter.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpSignalsAdapter.java @@ -14,7 +14,7 @@ */ package io.aklivity.zilla.runtime.exporter.otlp.internal.config; -import static io.aklivity.zilla.runtime.exporter.otlp.internal.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; +import static io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; import java.util.Set; import java.util.TreeSet; @@ -24,6 +24,8 @@ import jakarta.json.JsonArrayBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig; + public class OtlpSignalsAdapter implements JsonbAdapter, JsonArray> { private static final String METRICS_NAME = "metrics"; diff --git a/incubator/exporter-otlp/src/main/moditect/module-info.java b/incubator/exporter-otlp/src/main/moditect/module-info.java index 535428885e..5f3622e7c7 100644 --- a/incubator/exporter-otlp/src/main/moditect/module-info.java +++ b/incubator/exporter-otlp/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ requires java.net.http; requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.exporter.otlp.config; + provides io.aklivity.zilla.runtime.engine.exporter.ExporterFactorySpi with io.aklivity.zilla.runtime.exporter.otlp.internal.OtlpExporterFactorySpi; diff --git a/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OltpOptionsConfigAdapterTest.java b/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OltpOptionsConfigAdapterTest.java index 93730297cd..21a94a613f 100644 --- a/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OltpOptionsConfigAdapterTest.java +++ b/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OltpOptionsConfigAdapterTest.java @@ -14,7 +14,7 @@ */ package io.aklivity.zilla.runtime.exporter.otlp.internal.config; -import static io.aklivity.zilla.runtime.exporter.otlp.internal.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; +import static io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -31,6 +31,10 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpEndpointConfig; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOverridesConfig; + public class OltpOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfigTest.java b/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfigTest.java index 1c7404b218..7ecb23c681 100644 --- a/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfigTest.java +++ b/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfigTest.java @@ -14,7 +14,7 @@ */ package io.aklivity.zilla.runtime.exporter.otlp.internal.config; -import static io.aklivity.zilla.runtime.exporter.otlp.internal.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; +import static io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig.OtlpSignalsConfig.METRICS; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; @@ -24,6 +24,9 @@ import org.junit.Test; import io.aklivity.zilla.runtime.engine.config.ExporterConfig; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpEndpointConfig; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOptionsConfig; +import io.aklivity.zilla.runtime.exporter.otlp.config.OtlpOverridesConfig; public class OtlpExporterConfigTest { diff --git a/pom.xml b/pom.xml index 50eda46dcc..1e1a529cb8 100644 --- a/pom.xml +++ b/pom.xml @@ -249,6 +249,7 @@ compile + true diff --git a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfig.java b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/config/FileSystemOptionsConfig.java similarity index 95% rename from runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfig.java rename to runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/config/FileSystemOptionsConfig.java index 54444b9568..bbd1d782cd 100644 --- a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfig.java +++ b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/config/FileSystemOptionsConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.filesystem.internal.config; +package io.aklivity.zilla.runtime.binding.filesystem.config; import java.net.URI; import java.nio.file.FileSystem; diff --git a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemSymbolicLinksConfig.java b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/config/FileSystemSymbolicLinksConfig.java similarity index 90% rename from runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemSymbolicLinksConfig.java rename to runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/config/FileSystemSymbolicLinksConfig.java index 47e236df9c..d346dc56e1 100644 --- a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemSymbolicLinksConfig.java +++ b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/config/FileSystemSymbolicLinksConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.filesystem.internal.config; +package io.aklivity.zilla.runtime.binding.filesystem.config; public enum FileSystemSymbolicLinksConfig { diff --git a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemBindingConfig.java b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemBindingConfig.java index 9d34b0746b..85b701bd17 100644 --- a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemBindingConfig.java +++ b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemBindingConfig.java @@ -14,6 +14,7 @@ */ package io.aklivity.zilla.runtime.binding.filesystem.internal.config; +import io.aklivity.zilla.runtime.binding.filesystem.config.FileSystemOptionsConfig; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; diff --git a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfigAdapter.java b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfigAdapter.java index 7318486e71..4b22e9ca73 100644 --- a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfigAdapter.java +++ b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfigAdapter.java @@ -14,7 +14,7 @@ */ package io.aklivity.zilla.runtime.binding.filesystem.internal.config; -import static io.aklivity.zilla.runtime.binding.filesystem.internal.config.FileSystemSymbolicLinksConfig.IGNORE; +import static io.aklivity.zilla.runtime.binding.filesystem.config.FileSystemSymbolicLinksConfig.IGNORE; import java.net.URI; @@ -23,6 +23,8 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.filesystem.config.FileSystemOptionsConfig; +import io.aklivity.zilla.runtime.binding.filesystem.config.FileSystemSymbolicLinksConfig; import io.aklivity.zilla.runtime.binding.filesystem.internal.FileSystemBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; diff --git a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/stream/FileSystemServerFactory.java b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/stream/FileSystemServerFactory.java index 0b1e29a1bc..f19ff2d0c9 100644 --- a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/stream/FileSystemServerFactory.java +++ b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/stream/FileSystemServerFactory.java @@ -14,7 +14,7 @@ */ package io.aklivity.zilla.runtime.binding.filesystem.internal.stream; -import static io.aklivity.zilla.runtime.binding.filesystem.internal.config.FileSystemSymbolicLinksConfig.IGNORE; +import static io.aklivity.zilla.runtime.binding.filesystem.config.FileSystemSymbolicLinksConfig.IGNORE; import static io.aklivity.zilla.runtime.engine.budget.BudgetDebitor.NO_DEBITOR_INDEX; import static java.nio.file.LinkOption.NOFOLLOW_LINKS; import static java.time.Instant.now; @@ -41,11 +41,11 @@ import org.agrona.collections.Long2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.filesystem.config.FileSystemOptionsConfig; import io.aklivity.zilla.runtime.binding.filesystem.internal.FileSystemBinding; import io.aklivity.zilla.runtime.binding.filesystem.internal.FileSystemConfiguration; import io.aklivity.zilla.runtime.binding.filesystem.internal.FileSystemWatcher; import io.aklivity.zilla.runtime.binding.filesystem.internal.config.FileSystemBindingConfig; -import io.aklivity.zilla.runtime.binding.filesystem.internal.config.FileSystemOptionsConfig; import io.aklivity.zilla.runtime.binding.filesystem.internal.types.FileSystemCapabilities; import io.aklivity.zilla.runtime.binding.filesystem.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.filesystem.internal.types.OctetsFW; diff --git a/runtime/binding-filesystem/src/main/moditect/module-info.java b/runtime/binding-filesystem/src/main/moditect/module-info.java index b58f5e84fe..3e6f268443 100644 --- a/runtime/binding-filesystem/src/main/moditect/module-info.java +++ b/runtime/binding-filesystem/src/main/moditect/module-info.java @@ -16,6 +16,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.filesystem.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.filesystem.internal.FileSystemBindingFactorySpi; diff --git a/runtime/binding-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java b/runtime/binding-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java index 3923795dee..593263191d 100644 --- a/runtime/binding-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java +++ b/runtime/binding-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java @@ -28,6 +28,9 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.filesystem.config.FileSystemOptionsConfig; +import io.aklivity.zilla.runtime.binding.filesystem.config.FileSystemSymbolicLinksConfig; + public class FileSystemOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionConfig.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaConditionConfig.java similarity index 84% rename from runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionConfig.java rename to runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaConditionConfig.java index 59ba0f1ba3..1264c4dca1 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionConfig.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaConditionConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.kafka.config; import java.util.Map; @@ -23,12 +23,12 @@ public final class GrpcKafkaConditionConfig extends ConditionConfig { public final String service; public final String method; - public final Map metadata; + public final Map metadata; public GrpcKafkaConditionConfig( String service, String method, - Map metadata) + Map metadata) { this.service = service; this.method = method; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaCorrelationConfig.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaCorrelationConfig.java similarity index 94% rename from runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaCorrelationConfig.java rename to runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaCorrelationConfig.java index 782d48cc7d..b56afa2e24 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaCorrelationConfig.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaCorrelationConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.kafka.config; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaIdempotencyConfig.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaIdempotencyConfig.java similarity index 92% rename from runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaIdempotencyConfig.java rename to runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaIdempotencyConfig.java index 29f6bbf60d..bacda2fb7b 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaIdempotencyConfig.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaIdempotencyConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.kafka.config; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String8FW; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaMetadataValue.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaMetadataValueConfig.java similarity index 86% rename from runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaMetadataValue.java rename to runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaMetadataValueConfig.java index aab70ed197..cc66e83413 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaMetadataValue.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaMetadataValueConfig.java @@ -12,17 +12,17 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.kafka.config; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; -public class GrpcKafkaMetadataValue +public class GrpcKafkaMetadataValueConfig { public final String16FW textValue; public final String16FW base64Value; - public GrpcKafkaMetadataValue( + public GrpcKafkaMetadataValueConfig( String16FW textValue, String16FW base64Value) { diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaOptionsConfig.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaOptionsConfig.java similarity index 94% rename from runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaOptionsConfig.java rename to runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaOptionsConfig.java index 7723d4ec35..61ffd699f4 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaOptionsConfig.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaOptionsConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.kafka.config; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaReliabilityConfig.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaReliabilityConfig.java similarity index 93% rename from runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaReliabilityConfig.java rename to runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaReliabilityConfig.java index 3c64395f2c..9cf93d65eb 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaReliabilityConfig.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/config/GrpcKafkaReliabilityConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.kafka.config; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String8FW; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaBindingConfig.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaBindingConfig.java index 07da27d9c7..92680945f9 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaBindingConfig.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaBindingConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Optional; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaOptionsConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.stream.GrpcBeginExFW; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionConfigAdapter.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionConfigAdapter.java index 9dc04e65f5..fa829dc196 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionConfigAdapter.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionConfigAdapter.java @@ -29,13 +29,14 @@ import org.agrona.collections.Object2ObjectHashMap; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaConditionConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaMetadataValueConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.GrpcKafkaBinding; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String8FW; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; - public final class GrpcKafkaConditionConfigAdapter implements ConditionConfigAdapterSpi, JsonbAdapter { private static final Pattern METHOD_PATTERN = Pattern.compile("^(?[^/]+)/(?[^/]+)"); @@ -104,7 +105,7 @@ public ConditionConfig adaptFromJson( ? object.getJsonObject(METADATA_NAME) : null; - final Map newMetadata = new Object2ObjectHashMap<>(); + final Map newMetadata = new Object2ObjectHashMap<>(); if (metadata != null) { @@ -129,7 +130,7 @@ public ConditionConfig adaptFromJson( break; } - GrpcKafkaMetadataValue metadataValue = new GrpcKafkaMetadataValue(new String16FW(textValue), + GrpcKafkaMetadataValueConfig metadataValue = new GrpcKafkaMetadataValueConfig(new String16FW(textValue), new String16FW(base64Value)); newMetadata.put(key, metadataValue); }); diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionMatcher.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionMatcher.java index 51445018e4..63dc15005c 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionMatcher.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaConditionMatcher.java @@ -20,6 +20,8 @@ import org.agrona.DirectBuffer; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaConditionConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaMetadataValueConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String8FW; @@ -31,7 +33,7 @@ public final class GrpcKafkaConditionMatcher private final String16FW service; private final String16FW method; - private final Map metadataMatch; + private final Map metadataMatch; public GrpcKafkaConditionMatcher( GrpcKafkaConditionConfig condition) @@ -50,12 +52,12 @@ public boolean matches( if (metadataMatch != null) { - for (Map.Entry entry : metadataMatch.entrySet()) + for (Map.Entry entry : metadataMatch.entrySet()) { final DirectBuffer name = entry.getKey().value(); final GrpcMetadataFW metadata = metadataHeaders.matchFirst(h -> name.compareTo(h.name().value()) == 0); - final GrpcKafkaMetadataValue value = entry.getValue(); + final GrpcKafkaMetadataValueConfig value = entry.getValue(); final DirectBuffer matcher = metadata != null && metadata.type().get() == BASE64 ? value.base64Value.value() : value.textValue.value(); diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaOptionsConfigAdapter.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaOptionsConfigAdapter.java index c55c76e888..ffdf1991e8 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaOptionsConfigAdapter.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaOptionsConfigAdapter.java @@ -20,6 +20,10 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaCorrelationConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaIdempotencyConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaOptionsConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaReliabilityConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.GrpcKafkaBinding; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String8FW; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaRouteConfig.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaRouteConfig.java index 4f3b96f7da..6cd0342905 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaRouteConfig.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaRouteConfig.java @@ -24,6 +24,8 @@ import java.util.regex.MatchResult; import java.util.stream.Collectors; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaConditionConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaOptionsConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.stream.GrpcMetadataFW; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaWithProduceResult.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaWithProduceResult.java index d6a1b03032..a5ab3bbc68 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaWithProduceResult.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaWithProduceResult.java @@ -20,6 +20,7 @@ import org.agrona.DirectBuffer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaCorrelationConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.KafkaAckMode; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.KafkaAckModeFW; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaWithResolver.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaWithResolver.java index 22d39de0e1..dd84c6acf1 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaWithResolver.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaWithResolver.java @@ -25,6 +25,7 @@ import org.agrona.DirectBuffer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaOptionsConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.stream.GrpcKafkaIdHelper; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.KafkaAckMode; diff --git a/runtime/binding-grpc-kafka/src/main/moditect/module-info.java b/runtime/binding-grpc-kafka/src/main/moditect/module-info.java index 8a6050d029..004272de18 100644 --- a/runtime/binding-grpc-kafka/src/main/moditect/module-info.java +++ b/runtime/binding-grpc-kafka/src/main/moditect/module-info.java @@ -16,6 +16,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.grpc.kafka.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.grpc.kafka.internal.GrpcKafkaBindingFactorySpi; diff --git a/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/config/GrpcKafkaConditionConfigAdapterTest.java b/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/config/GrpcKafkaConditionConfigAdapterTest.java index d65ed3289f..dcf0960e2e 100644 --- a/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/config/GrpcKafkaConditionConfigAdapterTest.java +++ b/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/config/GrpcKafkaConditionConfigAdapterTest.java @@ -30,9 +30,9 @@ import org.junit.Before; import org.junit.Test; -import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config.GrpcKafkaConditionConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaConditionConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaMetadataValueConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config.GrpcKafkaConditionConfigAdapter; -import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config.GrpcKafkaMetadataValue; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String8FW; @@ -72,7 +72,7 @@ public void shouldReadCondition() public void shouldWriteCondition() { GrpcKafkaConditionConfig condition = new GrpcKafkaConditionConfig("test", "*", - singletonMap(new String8FW("custom"), new GrpcKafkaMetadataValue(new String16FW("test"), + singletonMap(new String8FW("custom"), new GrpcKafkaMetadataValueConfig(new String16FW("test"), new String16FW(Base64.getUrlEncoder().encodeToString("test".getBytes()))))); String text = jsonb.toJson(condition); diff --git a/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/config/GrpcKafkaOptionsConfigAdapterTest.java b/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/config/GrpcKafkaOptionsConfigAdapterTest.java index 6e1596bac5..f6a6fb4165 100644 --- a/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/config/GrpcKafkaOptionsConfigAdapterTest.java +++ b/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/config/GrpcKafkaOptionsConfigAdapterTest.java @@ -26,11 +26,11 @@ import org.junit.Before; import org.junit.Test; -import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config.GrpcKafkaCorrelationConfig; -import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config.GrpcKafkaIdempotencyConfig; -import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config.GrpcKafkaOptionsConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaCorrelationConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaIdempotencyConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaOptionsConfig; +import io.aklivity.zilla.runtime.binding.grpc.kafka.config.GrpcKafkaReliabilityConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config.GrpcKafkaOptionsConfigAdapter; -import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.config.GrpcKafkaReliabilityConfig; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String8FW; diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcConditionConfig.java similarity index 84% rename from runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionConfig.java rename to runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcConditionConfig.java index d287be644c..dc8e0a3f80 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcConditionConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.config; import java.util.Map; @@ -22,11 +22,11 @@ public final class GrpcConditionConfig extends ConditionConfig { public final String method; - public final Map metadata; + public final Map metadata; public GrpcConditionConfig( String method, - Map metadata) + Map metadata) { this.method = method; this.metadata = metadata; diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcMetadataValue.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcMetadataValueConfig.java similarity index 87% rename from runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcMetadataValue.java rename to runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcMetadataValueConfig.java index 7321cb1419..c90a8e2023 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcMetadataValue.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcMetadataValueConfig.java @@ -12,16 +12,16 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.config; import io.aklivity.zilla.runtime.binding.grpc.internal.types.String16FW; -public class GrpcMetadataValue +public class GrpcMetadataValueConfig { public final String16FW textValue; public final String16FW base64Value; - public GrpcMetadataValue( + public GrpcMetadataValueConfig( String16FW textValue, String16FW base64Value) { diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcMethodConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcMethodConfig.java similarity index 92% rename from runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcMethodConfig.java rename to runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcMethodConfig.java index f2beab572d..47f1e8648a 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcMethodConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcMethodConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.config; public class GrpcMethodConfig diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcOptionsConfig.java similarity index 93% rename from runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfig.java rename to runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcOptionsConfig.java index 06831378e7..e00944534a 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcOptionsConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.config; import java.util.List; diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcProtobufConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcProtobufConfig.java similarity index 93% rename from runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcProtobufConfig.java rename to runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcProtobufConfig.java index 736b42376d..00426f7747 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcProtobufConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcProtobufConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.config; import java.util.Set; diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcServiceConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcServiceConfig.java similarity index 93% rename from runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcServiceConfig.java rename to runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcServiceConfig.java index 50adada421..2ac7caefce 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcServiceConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/config/GrpcServiceConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.grpc.config; import java.util.Set; diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcBindingConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcBindingConfig.java index 0eba17cec3..323417f06a 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcBindingConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcBindingConfig.java @@ -35,8 +35,8 @@ import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; - - +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcMethodConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcOptionsConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.HttpHeaderFW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.String16FW; @@ -336,7 +336,7 @@ private long parsePeriod( milliseconds = TimeUnit.SECONDS.toMillis(number); break; case "m": - milliseconds = milliseconds; + milliseconds = TimeUnit.MILLISECONDS.toMillis(number); break; case "u": milliseconds = TimeUnit.MICROSECONDS.toMillis(number); diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionConfigAdapter.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionConfigAdapter.java index 2c1280170d..6c3db67c6f 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionConfigAdapter.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionConfigAdapter.java @@ -26,6 +26,8 @@ import org.agrona.collections.Object2ObjectHashMap; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcConditionConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcMetadataValueConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.GrpcBinding; import io.aklivity.zilla.runtime.binding.grpc.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.String8FW; @@ -87,7 +89,7 @@ public ConditionConfig adaptFromJson( ? object.getJsonObject(METADATA_NAME) : null; - final Map newMetadata = new Object2ObjectHashMap<>(); + final Map newMetadata = new Object2ObjectHashMap<>(); if (metadata != null) { @@ -118,7 +120,7 @@ public ConditionConfig adaptFromJson( throw new IllegalArgumentException("Unexpected type: " + valueType); } - GrpcMetadataValue metadataValue = new GrpcMetadataValue(new String16FW(textValue), + GrpcMetadataValueConfig metadataValue = new GrpcMetadataValueConfig(new String16FW(textValue), new String16FW(base64Value)); newMetadata.put(key, metadataValue); }); diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionMatcher.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionMatcher.java index 8c37b735b6..9ab89f2c0c 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionMatcher.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcConditionMatcher.java @@ -22,6 +22,8 @@ import org.agrona.DirectBuffer; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcConditionConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcMetadataValueConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.String8FW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.stream.GrpcMetadataFW; @@ -29,7 +31,7 @@ public final class GrpcConditionMatcher { private final Matcher method; - private final Map metadataMatch; + private final Map metadataMatch; public GrpcConditionMatcher( GrpcConditionConfig condition) @@ -47,12 +49,12 @@ public boolean matches( if (metadataMatch != null) { - for (Map.Entry entry : metadataMatch.entrySet()) + for (Map.Entry entry : metadataMatch.entrySet()) { final DirectBuffer name = entry.getKey().value(); final GrpcMetadataFW metadata = metadataHeaders.matchFirst(h -> name.compareTo(h.name().value()) == 0); - final GrpcMetadataValue value = entry.getValue(); + final GrpcMetadataValueConfig value = entry.getValue(); final DirectBuffer matcher = metadata != null && metadata.type().get() == BASE64 ? value.base64Value.value() : value.textValue.value(); diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapter.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapter.java index 65d6139706..c2a55f352c 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapter.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapter.java @@ -36,6 +36,9 @@ import org.antlr.v4.runtime.CommonTokenStream; import org.antlr.v4.runtime.tree.ParseTreeWalker; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcOptionsConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcProtobufConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcServiceConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.GrpcBinding; import io.aklivity.zilla.runtime.binding.grpc.internal.parser.Protobuf3Lexer; import io.aklivity.zilla.runtime.binding.grpc.internal.parser.Protobuf3Parser; @@ -47,7 +50,6 @@ public final class GrpcOptionsConfigAdapter implements OptionsConfigAdapterSpi, { private static final String SERVICES_NAME = "services"; private Function readURL; - private ConfigAdapterContext context; @Override public Kind kind() diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcRouteConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcRouteConfig.java index ffedbce01a..15b2b8d5e2 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcRouteConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcRouteConfig.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcConditionConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.stream.GrpcMetadataFW; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcServiceDefinitionListener.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcServiceDefinitionListener.java index f789fb5ff2..8ca4e97271 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcServiceDefinitionListener.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcServiceDefinitionListener.java @@ -18,6 +18,8 @@ import org.agrona.collections.ObjectHashSet; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcMethodConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcServiceConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.parser.Protobuf3BaseListener; import io.aklivity.zilla.runtime.binding.grpc.internal.parser.Protobuf3Parser; diff --git a/runtime/binding-grpc/src/main/moditect/module-info.java b/runtime/binding-grpc/src/main/moditect/module-info.java index e1f6d49c84..f267ffde54 100644 --- a/runtime/binding-grpc/src/main/moditect/module-info.java +++ b/runtime/binding-grpc/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ requires org.antlr.antlr4.runtime; requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.grpc.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.grpc.internal.GrpcBindingFactorySpi; diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapterTest.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapterTest.java index cacad23831..288ada9d69 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapterTest.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcOptionsConfigAdapterTest.java @@ -37,6 +37,10 @@ import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcMethodConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcOptionsConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcProtobufConfig; +import io.aklivity.zilla.runtime.binding.grpc.config.GrpcServiceConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; import io.aklivity.zilla.runtime.engine.internal.config.OptionsAdapter; diff --git a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfig.java b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/config/HttpFileSystemConditionConfig.java similarity index 92% rename from runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfig.java rename to runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/config/HttpFileSystemConditionConfig.java index 72dac40404..fe70c5b409 100644 --- a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfig.java +++ b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/config/HttpFileSystemConditionConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.http.filesystem.internal.config; +package io.aklivity.zilla.runtime.binding.http.filesystem.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfigAdapter.java b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfigAdapter.java index a4167556c2..4e9830d78f 100644 --- a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfigAdapter.java +++ b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfigAdapter.java @@ -19,6 +19,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.http.filesystem.config.HttpFileSystemConditionConfig; import io.aklivity.zilla.runtime.binding.http.filesystem.internal.HttpFileSystemBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionMatcher.java b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionMatcher.java index 8934184fdc..1e7f576090 100644 --- a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionMatcher.java +++ b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionMatcher.java @@ -18,6 +18,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.http.filesystem.config.HttpFileSystemConditionConfig; + public final class HttpFileSystemConditionMatcher { private final Matcher path; diff --git a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemRouteConfig.java b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemRouteConfig.java index 01838d6e0a..ae18b63032 100644 --- a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemRouteConfig.java +++ b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemRouteConfig.java @@ -21,6 +21,7 @@ import java.util.function.Consumer; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.http.filesystem.config.HttpFileSystemConditionConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-http-filesystem/src/main/moditect/module-info.java b/runtime/binding-http-filesystem/src/main/moditect/module-info.java index 03350365b3..f6e5b37c82 100644 --- a/runtime/binding-http-filesystem/src/main/moditect/module-info.java +++ b/runtime/binding-http-filesystem/src/main/moditect/module-info.java @@ -16,6 +16,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.http.filesystem.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.http.filesystem.internal.HttpFileSystemBindingFactorySpi; diff --git a/runtime/binding-http-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfigAdapterTest.java b/runtime/binding-http-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfigAdapterTest.java index a3880950d1..c6fa254b76 100644 --- a/runtime/binding-http-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfigAdapterTest.java +++ b/runtime/binding-http-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemConditionConfigAdapterTest.java @@ -26,6 +26,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.http.filesystem.config.HttpFileSystemConditionConfig; + public class HttpFileSystemConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfig.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaConditionConfig.java similarity index 93% rename from runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfig.java rename to runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaConditionConfig.java index 43511b4e65..18a69eddde 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfig.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaConditionConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.http.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.http.kafka.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaCorrelationConfig.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaCorrelationConfig.java similarity index 93% rename from runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaCorrelationConfig.java rename to runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaCorrelationConfig.java index 6b0460fbfb..e418bd0de1 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaCorrelationConfig.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaCorrelationConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.http.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.http.kafka.config; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.String16FW; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaIdempotencyConfig.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaIdempotencyConfig.java similarity index 92% rename from runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaIdempotencyConfig.java rename to runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaIdempotencyConfig.java index 14d3ec912d..658af4d007 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaIdempotencyConfig.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaIdempotencyConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.http.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.http.kafka.config; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.String8FW; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfig.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaOptionsConfig.java similarity index 93% rename from runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfig.java rename to runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaOptionsConfig.java index 7e71cf1266..efb1b23138 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfig.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/config/HttpKafkaOptionsConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.http.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.http.kafka.config; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaBindingConfig.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaBindingConfig.java index 8bd37ec6d5..3f563112a9 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaBindingConfig.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaBindingConfig.java @@ -25,6 +25,7 @@ import org.agrona.AsciiSequenceView; import org.agrona.DirectBuffer; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaOptionsConfig; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.HttpHeaderFW; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.String8FW; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfigAdapter.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfigAdapter.java index 0bca44d78a..c7c4dfff7d 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfigAdapter.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfigAdapter.java @@ -19,6 +19,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaConditionConfig; import io.aklivity.zilla.runtime.binding.http.kafka.internal.HttpKafkaBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionMatcher.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionMatcher.java index 6b55540ab5..865e6e99cb 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionMatcher.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionMatcher.java @@ -18,6 +18,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaConditionConfig; + public final class HttpKafkaConditionMatcher { private final Matcher method; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfigAdapter.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfigAdapter.java index 582e610cab..60aee03297 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfigAdapter.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfigAdapter.java @@ -19,6 +19,9 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaCorrelationConfig; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaIdempotencyConfig; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaOptionsConfig; import io.aklivity.zilla.runtime.binding.http.kafka.internal.HttpKafkaBinding; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.String8FW; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaRouteConfig.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaRouteConfig.java index c7a6cbfd7f..f1d554eb22 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaRouteConfig.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaRouteConfig.java @@ -24,6 +24,8 @@ import java.util.regex.MatchResult; import java.util.stream.Collectors; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaConditionConfig; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaOptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; import io.aklivity.zilla.runtime.engine.util.function.LongObjectBiFunction; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaWithProduceResult.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaWithProduceResult.java index b27c9fe992..0ac118be56 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaWithProduceResult.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaWithProduceResult.java @@ -20,6 +20,7 @@ import org.agrona.DirectBuffer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaCorrelationConfig; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.HttpHeaderFW; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.KafkaAckMode; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaWithResolver.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaWithResolver.java index 9ba9e3ace4..913828184d 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaWithResolver.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaWithResolver.java @@ -28,6 +28,7 @@ import org.agrona.DirectBuffer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaOptionsConfig; import io.aklivity.zilla.runtime.binding.http.kafka.internal.stream.HttpKafkaEtagHelper; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.HttpHeaderFW; diff --git a/runtime/binding-http-kafka/src/main/moditect/module-info.java b/runtime/binding-http-kafka/src/main/moditect/module-info.java index 58b5fd9052..7bbb7ca55b 100644 --- a/runtime/binding-http-kafka/src/main/moditect/module-info.java +++ b/runtime/binding-http-kafka/src/main/moditect/module-info.java @@ -16,6 +16,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.http.kafka.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.http.kafka.internal.HttpKafkaBindingFactorySpi; diff --git a/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfigAdapterTest.java b/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfigAdapterTest.java index 70a5021255..3b10e971e2 100644 --- a/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfigAdapterTest.java +++ b/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaConditionConfigAdapterTest.java @@ -26,6 +26,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaConditionConfig; + public class HttpKafkaConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfigAdapterTest.java b/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfigAdapterTest.java index 0d3038589d..f3ed5bc871 100644 --- a/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfigAdapterTest.java +++ b/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaOptionsConfigAdapterTest.java @@ -26,6 +26,9 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaCorrelationConfig; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaIdempotencyConfig; +import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaOptionsConfig; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.kafka.internal.types.String8FW; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpAccessControlConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfig.java similarity index 97% rename from runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpAccessControlConfig.java rename to runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfig.java index b8e87df4e6..bea7f0940d 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpAccessControlConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfig.java @@ -13,10 +13,10 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.http.internal.config; +package io.aklivity.zilla.runtime.binding.http.config; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; import static java.lang.ThreadLocal.withInitial; import static java.util.Collections.unmodifiableSet; import static java.util.regex.Pattern.CASE_INSENSITIVE; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpAuthorizationConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfig.java similarity index 97% rename from runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpAuthorizationConfig.java rename to runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfig.java index 51604815e1..f734483f77 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpAuthorizationConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.http.internal.config; +package io.aklivity.zilla.runtime.binding.http.config; import java.util.List; import java.util.function.Function; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfig.java similarity index 93% rename from runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfig.java rename to runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfig.java index 084a1bea60..f3a3d42a96 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.http.internal.config; +package io.aklivity.zilla.runtime.binding.http.config; import java.util.Map; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java similarity index 95% rename from runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfig.java rename to runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java index cd3e53b306..fdcef5be3f 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.http.internal.config; +package io.aklivity.zilla.runtime.binding.http.config; import java.util.Map; import java.util.SortedSet; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpVersion.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpVersion.java similarity index 94% rename from runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpVersion.java rename to runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpVersion.java index 70510ba4d0..8008b3d1a2 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpVersion.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpVersion.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.http.internal.config; +package io.aklivity.zilla.runtime.binding.http.config; public enum HttpVersion { diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java index 2a64bcb2e6..b58dd778a7 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java @@ -15,8 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.config; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpAuthorizationConfig.DEFAULT_CREDENTIALS; +import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.DEFAULT_CREDENTIALS; import static java.util.EnumSet.allOf; import static java.util.stream.Collectors.toList; @@ -28,8 +28,11 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAuthorizationConfig.HttpCredentialsConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAuthorizationConfig.HttpPatternConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpCredentialsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpPatternConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapter.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapter.java index 17cc93098f..f13363c6b1 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapter.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapter.java @@ -24,6 +24,7 @@ import jakarta.json.JsonString; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfig; import io.aklivity.zilla.runtime.binding.http.internal.HttpBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionMatcher.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionMatcher.java index 8f1d962c1b..253040c8b3 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionMatcher.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionMatcher.java @@ -21,6 +21,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfig; + public final class HttpConditionMatcher { private final Map headersMatch; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapter.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapter.java index 1a700f1548..f903680c88 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapter.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapter.java @@ -15,8 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.config; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; import java.time.Duration; import java.util.ArrayList; @@ -37,11 +37,15 @@ import jakarta.json.JsonString; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpAllowConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpExposeConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpCredentialsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpPatternConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.HttpBinding; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpAllowConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpExposeConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAuthorizationConfig.HttpCredentialsConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAuthorizationConfig.HttpPatternConfig; import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.internal.types.String8FW; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRouteConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRouteConfig.java index 8a5fe931c2..1249894a7e 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRouteConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRouteConfig.java @@ -21,6 +21,7 @@ import java.util.function.Function; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpClientFactory.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpClientFactory.java index 8660bcc2ff..51c51ca383 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpClientFactory.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpClientFactory.java @@ -15,8 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.stream; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpVersion.HTTP_1_1; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpVersion.HTTP_2; +import static io.aklivity.zilla.runtime.binding.http.config.HttpVersion.HTTP_1_1; +import static io.aklivity.zilla.runtime.binding.http.config.HttpVersion.HTTP_2; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext.TE; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext.TRAILERS; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackHeaderFieldFW.HeaderFieldType.UNKNOWN; @@ -63,6 +63,7 @@ import org.agrona.collections.MutableInteger; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.HttpBinding; import io.aklivity.zilla.runtime.binding.http.internal.HttpConfiguration; import io.aklivity.zilla.runtime.binding.http.internal.codec.Http2ContinuationFW; @@ -82,7 +83,6 @@ import io.aklivity.zilla.runtime.binding.http.internal.codec.Http2WindowUpdateFW; import io.aklivity.zilla.runtime.binding.http.internal.config.HttpBindingConfig; import io.aklivity.zilla.runtime.binding.http.internal.config.HttpRouteConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext; import io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackHeaderBlockFW; import io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackHeaderFieldFW; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java index 1679cb1172..600494953d 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.stream; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext.CONNECTION; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext.KEEP_ALIVE; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext.PROXY_CONNECTION; @@ -79,6 +79,9 @@ import org.agrona.concurrent.AtomicBuffer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.HttpBinding; import io.aklivity.zilla.runtime.binding.http.internal.HttpConfiguration; import io.aklivity.zilla.runtime.binding.http.internal.codec.Http2ContinuationFW; @@ -96,11 +99,8 @@ import io.aklivity.zilla.runtime.binding.http.internal.codec.Http2Setting; import io.aklivity.zilla.runtime.binding.http.internal.codec.Http2SettingsFW; import io.aklivity.zilla.runtime.binding.http.internal.codec.Http2WindowUpdateFW; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpPolicyConfig; import io.aklivity.zilla.runtime.binding.http.internal.config.HttpBindingConfig; import io.aklivity.zilla.runtime.binding.http.internal.config.HttpRouteConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext; import io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackHeaderBlockFW; import io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackHeaderFieldFW; diff --git a/runtime/binding-http/src/main/moditect/module-info.java b/runtime/binding-http/src/main/moditect/module-info.java index 2bc7a266b0..6f1b960854 100644 --- a/runtime/binding-http/src/main/moditect/module-info.java +++ b/runtime/binding-http/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.http.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.http.internal.HttpBindingFactorySpi; diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java index ce24884700..5f361252a8 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java @@ -28,6 +28,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfig; + public class HttpConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java index 0f400c028c..a3a6ed74c7 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.config; -import static io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; @@ -37,10 +37,14 @@ import org.junit.Before; import org.junit.Test; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpAllowConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAccessControlConfig.HttpExposeConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAuthorizationConfig.HttpCredentialsConfig; -import io.aklivity.zilla.runtime.binding.http.internal.config.HttpAuthorizationConfig.HttpPatternConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpAllowConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpExposeConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpCredentialsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpPatternConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.internal.types.String8FW; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfig.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcConditionConfig.java similarity index 96% rename from runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfig.java rename to runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcConditionConfig.java index 2c35c77412..2a734509d3 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfig.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcConditionConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.kafka.grpc.config; import java.util.Map; import java.util.Optional; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcCorrelationConfig.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcCorrelationConfig.java similarity index 94% rename from runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcCorrelationConfig.java rename to runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcCorrelationConfig.java index beb3079c0b..37b41ff7f4 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcCorrelationConfig.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcCorrelationConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.kafka.grpc.config; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String16FW; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcIdempotencyConfig.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcIdempotencyConfig.java similarity index 92% rename from runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcIdempotencyConfig.java rename to runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcIdempotencyConfig.java index 8cb93014b2..ab4ecd0cdd 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcIdempotencyConfig.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcIdempotencyConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.kafka.grpc.config; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String8FW; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfig.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcOptionsConfig.java similarity index 94% rename from runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfig.java rename to runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcOptionsConfig.java index 2685689b17..3293a1169f 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfig.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/config/KafkaGrpcOptionsConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.grpc.internal.config; +package io.aklivity.zilla.runtime.binding.kafka.grpc.config; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.KafkaAckMode; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcBindingConfig.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcBindingConfig.java index b5490eb300..791c449c42 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcBindingConfig.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcBindingConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Optional; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcOptionsConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.stream.KafkaGrpcFetchHeaderHelper; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfigAdapter.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfigAdapter.java index 373bb4cbc1..e7bf19e7f7 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfigAdapter.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfigAdapter.java @@ -26,6 +26,7 @@ import org.agrona.collections.Object2ObjectHashMap; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcConditionConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.KafkaGrpcBinding; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String8FW; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionResolver.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionResolver.java index 9760e7ddfd..9f98b1ded9 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionResolver.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionResolver.java @@ -19,6 +19,8 @@ import org.agrona.DirectBuffer; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcConditionConfig; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcOptionsConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.KafkaAckMode; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String16FW; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionResult.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionResult.java index d7bdb95203..6f051e90b8 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionResult.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionResult.java @@ -18,6 +18,7 @@ import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcCorrelationConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.KafkaAckMode; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.KafkaAckModeFW; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfigAdapter.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfigAdapter.java index d6d41192ee..1564c6af28 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfigAdapter.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfigAdapter.java @@ -21,6 +21,9 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcCorrelationConfig; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcIdempotencyConfig; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcOptionsConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.KafkaGrpcBinding; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.KafkaAckMode; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String16FW; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcRouteConfig.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcRouteConfig.java index ba54ba363f..9216569441 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcRouteConfig.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcRouteConfig.java @@ -19,6 +19,8 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcConditionConfig; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcOptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcFetchHeaderHelper.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcFetchHeaderHelper.java index 93a29e0e7f..b9a9b6ba60 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcFetchHeaderHelper.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcFetchHeaderHelper.java @@ -18,7 +18,7 @@ import java.util.Map; import java.util.function.Consumer; -import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.config.KafkaGrpcCorrelationConfig; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcCorrelationConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.KafkaHeaderFW; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.OctetsFW; diff --git a/runtime/binding-kafka-grpc/src/main/moditect/module-info.java b/runtime/binding-kafka-grpc/src/main/moditect/module-info.java index f2019f0684..5b04be2b59 100644 --- a/runtime/binding-kafka-grpc/src/main/moditect/module-info.java +++ b/runtime/binding-kafka-grpc/src/main/moditect/module-info.java @@ -16,6 +16,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.kafka.grpc.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.kafka.grpc.internal.KafkaGrpcBindingFactorySpi; diff --git a/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfigAdapterTest.java b/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfigAdapterTest.java index 8fbfc5ee3d..b341a92d7a 100644 --- a/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfigAdapterTest.java +++ b/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcConditionConfigAdapterTest.java @@ -28,6 +28,7 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcConditionConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String8FW; diff --git a/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfigAdapterTest.java b/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfigAdapterTest.java index 3b22b93365..e5dae6f437 100644 --- a/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfigAdapterTest.java +++ b/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcOptionsConfigAdapterTest.java @@ -27,6 +27,9 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcCorrelationConfig; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcIdempotencyConfig; +import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcOptionsConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.kafka.grpc.internal.types.String8FW; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaConditionConfig.java similarity index 93% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfig.java rename to runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaConditionConfig.java index 0d222e179d..e7a22427cf 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.kafka.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaOptionsConfig.java similarity index 94% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfig.java rename to runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaOptionsConfig.java index e556f89b45..a19b7854f9 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaOptionsConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.kafka.config; import java.util.List; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaSaslConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaSaslConfig.java similarity index 94% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaSaslConfig.java rename to runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaSaslConfig.java index 8e8a5b8097..4860089a0c 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaSaslConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaSaslConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.kafka.config; public class KafkaSaslConfig { diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaTopicConfig.java similarity index 96% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfig.java rename to runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaTopicConfig.java index e9204fddac..a688ee438b 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/config/KafkaTopicConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.kafka.config; import java.util.Objects; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java index ff9e7c6d0e..52da2441c9 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaBindingConfig.java @@ -19,6 +19,9 @@ import java.util.List; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaOptionsConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaTopicConfig; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapter.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapter.java index 46e3cdd2e5..c5d242261f 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapter.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaConditionConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionMatcher.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionMatcher.java index 329e70b898..74802a51b6 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionMatcher.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionMatcher.java @@ -18,6 +18,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaConditionConfig; + public final class KafkaConditionMatcher { private final Matcher topicMatch; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapter.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapter.java index 8d5f24c8a2..7992783319 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapter.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapter.java @@ -26,6 +26,9 @@ import jakarta.json.JsonString; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaOptionsConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaTopicConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java index 83805da27a..ec6d5f54cc 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaConditionConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/ScramMechanism.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaScramMechanism.java similarity index 84% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/ScramMechanism.java rename to runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaScramMechanism.java index d7cbe7fa05..05bd5fcb6d 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/ScramMechanism.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaScramMechanism.java @@ -20,7 +20,7 @@ import java.util.HashMap; import java.util.Map; -public enum ScramMechanism +public enum KafkaScramMechanism { SCRAM_SHA_256("SHA-256", "HmacSHA256", 4096), SCRAM_SHA_512("SHA-512", "HmacSHA512", 4096), @@ -31,19 +31,19 @@ public enum ScramMechanism private final String macAlgorithm; private final int minIterations; - private static final Map MECHANISMS_MAP; + private static final Map MECHANISMS_MAP; static { - Map map = new HashMap<>(); - for (ScramMechanism mech : values()) + Map map = new HashMap<>(); + for (KafkaScramMechanism mech : values()) { map.put(mech.mechanismName, mech); } MECHANISMS_MAP = Collections.unmodifiableMap(map); } - ScramMechanism(String hashAlgorithm, String macAlgorithm, int minIterations) + KafkaScramMechanism(String hashAlgorithm, String macAlgorithm, int minIterations) { this.mechanismName = "SCRAM-" + hashAlgorithm; this.hashAlgorithm = hashAlgorithm; @@ -71,7 +71,7 @@ public int minIterations() return minIterations; } - public static ScramMechanism forMechanismName(String mechanismName) + public static KafkaScramMechanism forMechanismName(String mechanismName) { return MECHANISMS_MAP.get(mechanismName); } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfigAdapter.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfigAdapter.java index ffc1652e24..16ab5ff2f8 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfigAdapter.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaTopicConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaTopicConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaDeltaType; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaOffsetType; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerBootstrapFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerBootstrapFactory.java index 3354dde69a..44f10a622d 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerBootstrapFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerBootstrapFactory.java @@ -27,10 +27,10 @@ import org.agrona.collections.MutableInteger; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaTopicConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaTopicConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.ArrayFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaConfigFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaOffsetFW; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFetchFactory.java index 2a3109db76..9792ab6228 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFetchFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFetchFactory.java @@ -42,6 +42,7 @@ import org.agrona.collections.Int2IntHashMap; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaTopicConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCache; @@ -52,7 +53,6 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCacheTopic; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaTopicConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.ArrayFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDescribeFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDescribeFactory.java index eb45c1d6b4..d88079723f 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDescribeFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDescribeFactory.java @@ -35,11 +35,11 @@ import org.agrona.collections.LongLongConsumer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java index 31a240f66c..ac8f606a3f 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java @@ -31,11 +31,11 @@ import org.agrona.collections.LongLongConsumer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaHeaderFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaIsolation; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index 0a3867f0cc..d7d5265332 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -35,11 +35,11 @@ import org.agrona.collections.Object2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientMetaFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientMetaFactory.java index b572d3a30d..7e21af2c0b 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientMetaFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientMetaFactory.java @@ -31,11 +31,11 @@ import org.agrona.collections.LongLongConsumer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java index d312acf267..c4782b128e 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java @@ -38,11 +38,11 @@ import org.agrona.collections.LongLongConsumer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaAckMode; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java index 13337e1505..54de465f9f 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientSaslHandshaker.java @@ -33,9 +33,9 @@ import org.agrona.collections.LongLongConsumer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaSaslConfig; -import io.aklivity.zilla.runtime.binding.kafka.internal.config.ScramMechanism; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaScramMechanism; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.sasl.SaslAuthenticateRequestFW; @@ -90,7 +90,7 @@ public abstract class KafkaClientSaslHandshaker private MessageDigest messageDigest; private Mac mac; private Supplier nonceSupplier; - private ScramMechanism mechanism; + private KafkaScramMechanism mechanism; private Matcher serverResponseMatcher; private byte[] result, ui, prev; @@ -321,7 +321,7 @@ private void doEncodeSaslScramFinalAuthenticateRequest( long traceId, long budgetId) { - mechanism = ScramMechanism.forMechanismName(sasl.mechanism.toUpperCase()); + mechanism = KafkaScramMechanism.forMechanismName(sasl.mechanism.toUpperCase()); try { messageDigest = MessageDigest.getInstance(mechanism.hashAlgorithm()); diff --git a/runtime/binding-kafka/src/main/moditect/module-info.java b/runtime/binding-kafka/src/main/moditect/module-info.java index 5095971748..1487416b9f 100644 --- a/runtime/binding-kafka/src/main/moditect/module-info.java +++ b/runtime/binding-kafka/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.kafka.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBindingFactorySpi; diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapterTest.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapterTest.java index d28a654c15..c1432c0bb3 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapterTest.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaConditionConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaConditionConfig; + public class KafkaConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapterTest.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapterTest.java index 9cbcccbf85..34bf02c8bf 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapterTest.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaOptionsConfigAdapterTest.java @@ -30,6 +30,10 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaOptionsConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaTopicConfig; + public class KafkaOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyAddressConfig.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyAddressConfig.java similarity index 93% rename from runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyAddressConfig.java rename to runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyAddressConfig.java index a872d1c8f6..59fcb38533 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyAddressConfig.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyAddressConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.proxy.internal.config; +package io.aklivity.zilla.runtime.binding.proxy.config; public class ProxyAddressConfig { diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfig.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyConditionConfig.java similarity index 95% rename from runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfig.java rename to runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyConditionConfig.java index e44b5b1efc..8bf709f4aa 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfig.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.proxy.internal.config; +package io.aklivity.zilla.runtime.binding.proxy.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyInfoConfig.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyInfoConfig.java similarity index 89% rename from runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyInfoConfig.java rename to runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyInfoConfig.java index a39d538989..e793b22577 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyInfoConfig.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyInfoConfig.java @@ -13,7 +13,9 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.proxy.internal.config; +package io.aklivity.zilla.runtime.binding.proxy.config; + +import io.aklivity.zilla.runtime.binding.proxy.internal.config.ProxySecureInfoConfig; public class ProxyInfoConfig { diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfig.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyOptionsConfig.java similarity index 92% rename from runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfig.java rename to runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyOptionsConfig.java index 44b9e06477..7ad2317a50 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfig.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/config/ProxyOptionsConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.proxy.internal.config; +package io.aklivity.zilla.runtime.binding.proxy.config; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyAddressConfigAdapter.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyAddressConfigAdapter.java index e001175509..7f35dfa85e 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyAddressConfigAdapter.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyAddressConfigAdapter.java @@ -20,6 +20,8 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyAddressConfig; + public final class ProxyAddressConfigAdapter implements JsonbAdapter { private static final String HOST_NAME = "host"; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyBindingConfig.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyBindingConfig.java index 81797477ef..247e13fbb7 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyBindingConfig.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyBindingConfig.java @@ -19,6 +19,7 @@ import java.util.List; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyOptionsConfig; import io.aklivity.zilla.runtime.binding.proxy.internal.types.stream.ProxyBeginExFW; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfigAdapter.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfigAdapter.java index 4cd6486322..7e561d3df4 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfigAdapter.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfigAdapter.java @@ -20,6 +20,9 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyAddressConfig; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyConditionConfig; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyInfoConfig; import io.aklivity.zilla.runtime.binding.proxy.internal.ProxyBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionMatcher.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionMatcher.java index a6d3aaeb49..cc62d879e3 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionMatcher.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionMatcher.java @@ -46,6 +46,9 @@ import org.agrona.collections.MutableInteger; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyAddressConfig; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyConditionConfig; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyInfoConfig; import io.aklivity.zilla.runtime.binding.proxy.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.proxy.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.proxy.internal.types.ProxyAddressFW; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyInfoConfigAdapter.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyInfoConfigAdapter.java index 54efa1b3c7..574b7ef129 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyInfoConfigAdapter.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyInfoConfigAdapter.java @@ -23,6 +23,8 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyInfoConfig; + public final class ProxyInfoConfigAdapter implements JsonbAdapter { private static final String ALPN_NAME = "alpn"; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfigAdapter.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfigAdapter.java index c854084c23..4268f92a72 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfigAdapter.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyOptionsConfig; import io.aklivity.zilla.runtime.binding.proxy.internal.ProxyBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; diff --git a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyRouteConfig.java b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyRouteConfig.java index 26e51ea4f6..0c8a57f1be 100644 --- a/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyRouteConfig.java +++ b/runtime/binding-proxy/src/main/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyRouteConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyConditionConfig; import io.aklivity.zilla.runtime.binding.proxy.internal.types.stream.ProxyBeginExFW; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-proxy/src/main/moditect/module-info.java b/runtime/binding-proxy/src/main/moditect/module-info.java index 2864f16717..dddbb108e0 100644 --- a/runtime/binding-proxy/src/main/moditect/module-info.java +++ b/runtime/binding-proxy/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.proxy.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.proxy.internal.ProxyBindingFactorySpi; diff --git a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfigAdapterTest.java b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfigAdapterTest.java index e4fa86d343..999d1a8ec3 100644 --- a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfigAdapterTest.java +++ b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyConditionConfigAdapterTest.java @@ -28,6 +28,10 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyAddressConfig; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyConditionConfig; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyInfoConfig; + public class ProxyConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyMatcherTest.java b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyMatcherTest.java index bcdd75130d..48757416a4 100644 --- a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyMatcherTest.java +++ b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyMatcherTest.java @@ -29,6 +29,9 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyAddressConfig; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyConditionConfig; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyInfoConfig; import io.aklivity.zilla.runtime.binding.proxy.internal.types.stream.ProxyBeginExFW; public class ProxyMatcherTest diff --git a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfigAdapterTest.java b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfigAdapterTest.java index e17f917e07..8dc2e8a69e 100644 --- a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfigAdapterTest.java +++ b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/config/ProxyOptionsConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.proxy.config.ProxyOptionsConfig; + public class ProxyOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfig.java b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/config/SseKafkaConditionConfig.java similarity index 92% rename from runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfig.java rename to runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/config/SseKafkaConditionConfig.java index 548047cf50..8681ab6ab0 100644 --- a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfig.java +++ b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/config/SseKafkaConditionConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.binding.sse.kafka.internal.config; +package io.aklivity.zilla.runtime.binding.sse.kafka.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfigAdapter.java b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfigAdapter.java index e8ea4138c6..c1430063f8 100644 --- a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfigAdapter.java +++ b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfigAdapter.java @@ -19,6 +19,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.sse.kafka.config.SseKafkaConditionConfig; import io.aklivity.zilla.runtime.binding.sse.kafka.internal.SseKafkaBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionMatcher.java b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionMatcher.java index 4a2cb2d462..a39f06093f 100644 --- a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionMatcher.java +++ b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionMatcher.java @@ -18,6 +18,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.sse.kafka.config.SseKafkaConditionConfig; + public final class SseKafkaConditionMatcher { private final Matcher path; diff --git a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaRouteConfig.java b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaRouteConfig.java index 0d626883bc..b28a8b938a 100644 --- a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaRouteConfig.java +++ b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaRouteConfig.java @@ -25,6 +25,7 @@ import java.util.regex.MatchResult; import java.util.stream.Collectors; +import io.aklivity.zilla.runtime.binding.sse.kafka.config.SseKafkaConditionConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; import io.aklivity.zilla.runtime.engine.util.function.LongObjectBiFunction; diff --git a/runtime/binding-sse-kafka/src/main/moditect/module-info.java b/runtime/binding-sse-kafka/src/main/moditect/module-info.java index 0ae98e756f..abfc9307e6 100644 --- a/runtime/binding-sse-kafka/src/main/moditect/module-info.java +++ b/runtime/binding-sse-kafka/src/main/moditect/module-info.java @@ -16,6 +16,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.sse.kafka.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.sse.kafka.internal.SseKafkaBindingFactorySpi; diff --git a/runtime/binding-sse-kafka/src/test/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfigAdapterTest.java b/runtime/binding-sse-kafka/src/test/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfigAdapterTest.java index 17bb8a46df..98e7308c57 100644 --- a/runtime/binding-sse-kafka/src/test/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfigAdapterTest.java +++ b/runtime/binding-sse-kafka/src/test/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaConditionConfigAdapterTest.java @@ -26,6 +26,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.sse.kafka.config.SseKafkaConditionConfig; + public class SseKafkaConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfig.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/config/SseConditionConfig.java similarity index 93% rename from runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfig.java rename to runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/config/SseConditionConfig.java index 4a6b0523d2..0d6a8750d1 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfig.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/config/SseConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.sse.internal.config; +package io.aklivity.zilla.runtime.binding.sse.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfig.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/config/SseOptionsConfig.java similarity index 80% rename from runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfig.java rename to runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/config/SseOptionsConfig.java index 17943e4217..d8bd6baff8 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfig.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/config/SseOptionsConfig.java @@ -13,20 +13,13 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.sse.internal.config; +package io.aklivity.zilla.runtime.binding.sse.config; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; public final class SseOptionsConfig extends OptionsConfig { - static final int RETRY_DEFAULT = 2000; - - public int retry; - - public SseOptionsConfig() - { - this(RETRY_DEFAULT); - } + public final int retry; public SseOptionsConfig( int retry) diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseBindingConfig.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseBindingConfig.java index ae9e1cefa5..048347fd43 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseBindingConfig.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseBindingConfig.java @@ -15,16 +15,18 @@ */ package io.aklivity.zilla.runtime.binding.sse.internal.config; +import static io.aklivity.zilla.runtime.binding.sse.internal.config.SseOptionsConfigAdapter.RETRY_DEFAULT; import static java.util.stream.Collectors.toList; import java.util.List; +import io.aklivity.zilla.runtime.binding.sse.config.SseOptionsConfig; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; public final class SseBindingConfig { - private static final SseOptionsConfig DEFAULT_OPTIONS = new SseOptionsConfig(); + private static final SseOptionsConfig DEFAULT_OPTIONS = new SseOptionsConfig(RETRY_DEFAULT); public final long id; public final String name; diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfigAdapter.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfigAdapter.java index 4d9da66ff4..e57ddd58d5 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfigAdapter.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.sse.config.SseConditionConfig; import io.aklivity.zilla.runtime.binding.sse.internal.SseBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionMatcher.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionMatcher.java index 03bf8e9f4b..4824a1cf79 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionMatcher.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionMatcher.java @@ -18,6 +18,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.sse.config.SseConditionConfig; + public final class SseConditionMatcher { private final Matcher path; diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfigAdapter.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfigAdapter.java index 40848ff165..733bb75f7c 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfigAdapter.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.sse.config.SseOptionsConfig; import io.aklivity.zilla.runtime.binding.sse.internal.SseBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; @@ -27,6 +28,7 @@ public final class SseOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter { private static final String RETRY_NAME = "retry"; + public static final int RETRY_DEFAULT = 2000; @Override public Kind kind() @@ -48,7 +50,7 @@ public JsonObject adaptToJson( JsonObjectBuilder object = Json.createObjectBuilder(); - if (sseOptions.retry != SseOptionsConfig.RETRY_DEFAULT) + if (sseOptions.retry != SseOptionsConfigAdapter.RETRY_DEFAULT) { object.add(RETRY_NAME, sseOptions.retry); } @@ -62,7 +64,7 @@ public OptionsConfig adaptFromJson( { int retry = object.containsKey(RETRY_NAME) ? object.getInt(RETRY_NAME) - : SseOptionsConfig.RETRY_DEFAULT; + : SseOptionsConfigAdapter.RETRY_DEFAULT; return new SseOptionsConfig(retry); } diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseRouteConfig.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseRouteConfig.java index b0c9ce3aea..1a0f3388e8 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseRouteConfig.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseRouteConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.sse.config.SseConditionConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-sse/src/main/moditect/module-info.java b/runtime/binding-sse/src/main/moditect/module-info.java index 52b40d4046..813f34dfde 100644 --- a/runtime/binding-sse/src/main/moditect/module-info.java +++ b/runtime/binding-sse/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.sse.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.sse.internal.SseBindingFactorySpi; diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfigAdapterTest.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfigAdapterTest.java index 8f0596ec77..8fe4d14ea4 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfigAdapterTest.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseConditionConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.sse.config.SseConditionConfig; + public class SseConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfigAdapterTest.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfigAdapterTest.java index 3871942aa4..77c1e353a9 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfigAdapterTest.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseOptionsConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.sse.config.SseOptionsConfig; + public class SseOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-tcp/NOTICE b/runtime/binding-tcp/NOTICE index 6deac3d29c..08323b88fb 100644 --- a/runtime/binding-tcp/NOTICE +++ b/runtime/binding-tcp/NOTICE @@ -12,12 +12,6 @@ specific language governing permissions and limitations under the License. This project includes: - agrona under The Apache License, Version 2.0 - ICU4J under Unicode/ICU License - Jakarta JSON Processing API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception - JSON-B API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception - org.leadpony.justify under The Apache Software License, Version 2.0 - zilla::runtime::engine under The Apache Software License, Version 2.0 This project also includes code under copyright of the following entities: diff --git a/runtime/binding-tcp/pom.xml b/runtime/binding-tcp/pom.xml index 64d50cd0d8..7df01d00f5 100644 --- a/runtime/binding-tcp/pom.xml +++ b/runtime/binding-tcp/pom.xml @@ -41,6 +41,7 @@ ${project.groupId} engine ${project.version} + true ${project.groupId} diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfig.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfig.java similarity index 94% rename from runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfig.java rename to runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfig.java index 5c205199e4..8b765aeb11 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfig.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.tcp.internal.config; +package io.aklivity.zilla.runtime.binding.tcp.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfig.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfig.java similarity index 95% rename from runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfig.java rename to runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfig.java index 78ff5214e3..97cd0e7e44 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfig.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.tcp.internal.config; +package io.aklivity.zilla.runtime.binding.tcp.config; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpBindingConfig.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpBindingConfig.java index 3b848aba6c..e88948dd43 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpBindingConfig.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpBindingConfig.java @@ -21,6 +21,7 @@ import java.net.InetSocketAddress; import java.util.List; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapter.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapter.java index 21778d265c..d53476fc8f 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapter.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapter.java @@ -28,6 +28,7 @@ import org.agrona.collections.IntHashSet; import org.agrona.collections.MutableInteger; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.TcpBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionMatcher.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionMatcher.java index b1e23fb52a..f286b41635 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionMatcher.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionMatcher.java @@ -23,6 +23,7 @@ import org.agrona.collections.IntHashSet; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.util.Cidr; public final class TcpConditionMatcher diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapter.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapter.java index 128e004d5a..b6dd8ed617 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapter.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapter.java @@ -30,6 +30,7 @@ import org.agrona.collections.IntHashSet; import org.agrona.collections.MutableInteger; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.TcpBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpRouteConfig.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpRouteConfig.java index ecb17f4b97..2a97bea824 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpRouteConfig.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpRouteConfig.java @@ -22,6 +22,7 @@ import java.util.function.LongPredicate; import java.util.function.Predicate; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; public final class TcpRouteConfig diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpServerBindingConfig.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpServerBindingConfig.java index 104520b41e..5cb033a872 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpServerBindingConfig.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpServerBindingConfig.java @@ -29,6 +29,8 @@ import org.agrona.LangUtil; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; + public final class TcpServerBindingConfig { public final long id; diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientFactory.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientFactory.java index 52841d2f4f..2e2bb6c244 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientFactory.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientFactory.java @@ -42,9 +42,9 @@ import org.agrona.MutableDirectBuffer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.TcpConfiguration; import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpBindingConfig; -import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpOptionsConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.tcp.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.tcp.internal.types.stream.AbortFW; diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientRouter.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientRouter.java index 70f0c5013e..9f67193d7c 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientRouter.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientRouter.java @@ -29,8 +29,8 @@ import org.agrona.collections.Long2ObjectHashMap; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpBindingConfig; -import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpOptionsConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpRouteConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.tcp.internal.types.OctetsFW; diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpServerFactory.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpServerFactory.java index 06d151981a..384e79d496 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpServerFactory.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpServerFactory.java @@ -43,9 +43,9 @@ import org.agrona.MutableDirectBuffer; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.TcpConfiguration; import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpBindingConfig; -import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpOptionsConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpRouteConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.config.TcpServerBindingConfig; import io.aklivity.zilla.runtime.binding.tcp.internal.types.Flyweight; diff --git a/runtime/binding-tcp/src/main/moditect/module-info.java b/runtime/binding-tcp/src/main/moditect/module-info.java index 12014cc672..103492b28c 100644 --- a/runtime/binding-tcp/src/main/moditect/module-info.java +++ b/runtime/binding-tcp/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.tcp.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.tcp.internal.TcpBindingFactorySpi; diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java index 5ae0a6dc86..25d3ac237e 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfig; + public class TcpConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java index c2038ceaf0..33f9c16035 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; + public class TcpOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfig.java similarity index 93% rename from runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfig.java rename to runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfig.java index ef4e33a446..79694818f5 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.tls.internal.config; +package io.aklivity.zilla.runtime.binding.tls.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsMutual.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutual.java similarity index 91% rename from runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsMutual.java rename to runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutual.java index 63443d3612..94d623fac1 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsMutual.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutual.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.tls.internal.config; +package io.aklivity.zilla.runtime.binding.tls.config; public enum TlsMutual { diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfig.java similarity index 96% rename from runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfig.java rename to runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfig.java index 3d9f520a6e..eeff36b470 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.tls.internal.config; +package io.aklivity.zilla.runtime.binding.tls.config; import java.util.List; diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java index f31e3fdef0..19e59b54b4 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java @@ -49,6 +49,8 @@ import org.agrona.LangUtil; +import io.aklivity.zilla.runtime.binding.tls.config.TlsMutual; +import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfig; import io.aklivity.zilla.runtime.binding.tls.internal.TlsConfiguration; import io.aklivity.zilla.runtime.binding.tls.internal.identity.TlsClientX509ExtendedKeyManager; import io.aklivity.zilla.runtime.binding.tls.internal.types.Array32FW; diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapter.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapter.java index 45632931c6..473a5736f4 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapter.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.tls.config.TlsConditionConfig; import io.aklivity.zilla.runtime.binding.tls.internal.TlsBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionMatcher.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionMatcher.java index 2cd8bcb745..ba17acb12c 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionMatcher.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionMatcher.java @@ -18,6 +18,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.tls.config.TlsConditionConfig; + public final class TlsConditionMatcher { public final Matcher authorityMatch; diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapter.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapter.java index 54d968dfc0..fb869c2be7 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapter.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapter.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.tls.internal.config; -import static io.aklivity.zilla.runtime.binding.tls.internal.config.TlsMutual.REQUIRED; +import static io.aklivity.zilla.runtime.binding.tls.config.TlsMutual.REQUIRED; import static java.util.stream.Collectors.toList; import java.util.List; @@ -29,6 +29,8 @@ import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.tls.config.TlsMutual; +import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfig; import io.aklivity.zilla.runtime.binding.tls.internal.TlsBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsRouteConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsRouteConfig.java index 6893b4dbce..1dd7a45e76 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsRouteConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsRouteConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.tls.config.TlsConditionConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-tls/src/main/moditect/module-info.java b/runtime/binding-tls/src/main/moditect/module-info.java index 13b4b0648d..194599b35c 100644 --- a/runtime/binding-tls/src/main/moditect/module-info.java +++ b/runtime/binding-tls/src/main/moditect/module-info.java @@ -19,6 +19,8 @@ requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.tls.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.tls.internal.TlsBindingFactorySpi; diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java index 1f9b8963d6..02555c813b 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.tls.config.TlsConditionConfig; + public class TlsConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java index 5fba85502e..256ec74c47 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.tls.internal.config; -import static io.aklivity.zilla.runtime.binding.tls.internal.config.TlsMutual.REQUESTED; +import static io.aklivity.zilla.runtime.binding.tls.config.TlsMutual.REQUESTED; import static java.util.Arrays.asList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -29,6 +29,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfig; + public class TlsOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfig.java b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/config/WsConditionConfig.java similarity index 94% rename from runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfig.java rename to runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/config/WsConditionConfig.java index 5b56f05579..c312415b84 100644 --- a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfig.java +++ b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/config/WsConditionConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.ws.internal.config; +package io.aklivity.zilla.runtime.binding.ws.config; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; diff --git a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfig.java b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/config/WsOptionsConfig.java similarity index 94% rename from runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfig.java rename to runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/config/WsOptionsConfig.java index b6b9e4f4a7..3a20767422 100644 --- a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfig.java +++ b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/config/WsOptionsConfig.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.ws.internal.config; +package io.aklivity.zilla.runtime.binding.ws.config; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; diff --git a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsBindingConfig.java b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsBindingConfig.java index 36a5005258..7dbb3982b8 100644 --- a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsBindingConfig.java +++ b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsBindingConfig.java @@ -19,6 +19,7 @@ import java.util.List; +import io.aklivity.zilla.runtime.binding.ws.config.WsOptionsConfig; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; diff --git a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfigAdapter.java b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfigAdapter.java index febc29cb17..1b7946e73e 100644 --- a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfigAdapter.java +++ b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.ws.config.WsConditionConfig; import io.aklivity.zilla.runtime.binding.ws.internal.WsBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; diff --git a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionMatcher.java b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionMatcher.java index a37a8a7f42..62b4439f2a 100644 --- a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionMatcher.java +++ b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionMatcher.java @@ -18,6 +18,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.aklivity.zilla.runtime.binding.ws.config.WsConditionConfig; + public final class WsConditionMatcher { private final Matcher protocol; diff --git a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfigAdapter.java b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfigAdapter.java index c89a0bcae7..2732f5c28b 100644 --- a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfigAdapter.java +++ b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.binding.ws.config.WsOptionsConfig; import io.aklivity.zilla.runtime.binding.ws.internal.WsBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; diff --git a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsRouteConfig.java b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsRouteConfig.java index 2fc17f7ceb..5515051730 100644 --- a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsRouteConfig.java +++ b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsRouteConfig.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.function.LongPredicate; +import io.aklivity.zilla.runtime.binding.ws.config.WsConditionConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; diff --git a/runtime/binding-ws/src/main/moditect/module-info.java b/runtime/binding-ws/src/main/moditect/module-info.java index fe4c546064..9ac95ddcde 100644 --- a/runtime/binding-ws/src/main/moditect/module-info.java +++ b/runtime/binding-ws/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ { requires io.aklivity.zilla.runtime.engine; + exports io.aklivity.zilla.runtime.binding.ws.config; + provides io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi with io.aklivity.zilla.runtime.binding.ws.internal.WsBindingFactorySpi; diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfigAdapterTest.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfigAdapterTest.java index 352300272e..a9f3607b42 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfigAdapterTest.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsConditionConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.ws.config.WsConditionConfig; + public class WsConditionConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfigAdapterTest.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfigAdapterTest.java index 07d2c18044..2eeb6c7053 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfigAdapterTest.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsOptionsConfigAdapterTest.java @@ -27,6 +27,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.binding.ws.config.WsOptionsConfig; + public class WsOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/engine/NOTICE b/runtime/engine/NOTICE index 026266af23..212f9654aa 100644 --- a/runtime/engine/NOTICE +++ b/runtime/engine/NOTICE @@ -14,9 +14,14 @@ under the License. This project includes: agrona under The Apache License, Version 2.0 ICU4J under Unicode/ICU License + Jackson-annotations under The Apache Software License, Version 2.0 + Jackson-core under The Apache Software License, Version 2.0 + jackson-databind under The Apache Software License, Version 2.0 + Jackson-dataformat-YAML under The Apache Software License, Version 2.0 Jakarta JSON Processing API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception JSON-B API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception org.leadpony.justify under The Apache Software License, Version 2.0 + SnakeYAML under Apache License, Version 2.0 This project also includes code under copyright of the following entities: diff --git a/runtime/engine/pom.xml b/runtime/engine/pom.xml index b3675dad5d..4b7ca88b6f 100644 --- a/runtime/engine/pom.xml +++ b/runtime/engine/pom.xml @@ -69,6 +69,11 @@ + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + 2.15.2 + org.jmock jmock-junit4 diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigException.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigException.java new file mode 100644 index 0000000000..c22bed5226 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigException.java @@ -0,0 +1,27 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +public final class ConfigException extends RuntimeException +{ + private static final long serialVersionUID = 1L; + + public ConfigException( + String message) + { + super(message); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigReader.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigReader.java new file mode 100644 index 0000000000..6cd6708943 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigReader.java @@ -0,0 +1,150 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static jakarta.json.stream.JsonGenerator.PRETTY_PRINTING; +import static java.util.Collections.singletonMap; +import static org.agrona.LangUtil.rethrowUnchecked; + +import java.io.InputStream; +import java.io.Reader; +import java.io.StringReader; +import java.io.StringWriter; +import java.net.URL; +import java.util.Collection; +import java.util.LinkedList; +import java.util.List; +import java.util.function.Consumer; + +import jakarta.json.JsonArray; +import jakarta.json.JsonObject; +import jakarta.json.JsonPatch; +import jakarta.json.JsonReader; +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; +import jakarta.json.bind.JsonbConfig; +import jakarta.json.spi.JsonProvider; +import jakarta.json.stream.JsonParser; + +import org.leadpony.justify.api.JsonSchema; +import org.leadpony.justify.api.JsonSchemaReader; +import org.leadpony.justify.api.JsonValidationService; +import org.leadpony.justify.api.ProblemHandler; + +import io.aklivity.zilla.runtime.engine.Engine; +import io.aklivity.zilla.runtime.engine.internal.config.NamespaceAdapter; +import io.aklivity.zilla.runtime.engine.internal.config.schema.UniquePropertyKeysSchema; + +public final class ConfigReader +{ + private final ConfigAdapterContext context; + private final Collection schemaTypes; + private final Consumer logger; + + public ConfigReader( + ConfigAdapterContext context, + Collection schemaTypes, + Consumer logger) + { + this.context = context; + this.schemaTypes = schemaTypes; + this.logger = logger; + } + + public NamespaceConfig read( + Reader reader) + { + NamespaceConfig namespace = null; + + List errors = new LinkedList<>(); + + read: + try + { + InputStream schemaInput = Engine.class.getResourceAsStream("internal/schema/engine.schema.json"); + + JsonProvider schemaProvider = JsonProvider.provider(); + JsonReader schemaReader = schemaProvider.createReader(schemaInput); + JsonObject schemaObject = schemaReader.readObject(); + + for (URL schemaType : schemaTypes) + { + InputStream schemaPatchInput = schemaType.openStream(); + JsonReader schemaPatchReader = schemaProvider.createReader(schemaPatchInput); + JsonArray schemaPatchArray = schemaPatchReader.readArray(); + JsonPatch schemaPatch = schemaProvider.createPatch(schemaPatchArray); + + schemaObject = schemaPatch.apply(schemaObject); + } + + if (logger != null) + { + final StringWriter out = new StringWriter(); + schemaProvider.createGeneratorFactory(singletonMap(PRETTY_PRINTING, true)) + .createGenerator(out) + .write(schemaObject) + .close(); + + final String schemaText = out.getBuffer().toString(); + logger.accept(schemaText); + } + + JsonParser schemaParser = schemaProvider.createParserFactory(null) + .createParser(new StringReader(schemaObject.toString())); + + JsonValidationService service = JsonValidationService.newInstance(); + ProblemHandler handler = service.createProblemPrinter(msg -> errors.add(new ConfigException(msg))); + JsonSchemaReader validator = service.createSchemaReader(schemaParser); + JsonSchema schema = new UniquePropertyKeysSchema(validator.read()); + + JsonProvider provider = service.createJsonProvider(schema, parser -> handler); + //provider.createReader(reader).read(); + + if (!errors.isEmpty()) + { + break read; + } + + JsonbConfig config = new JsonbConfig() + .withAdapters(new NamespaceAdapter(context)); + Jsonb jsonb = JsonbBuilder.newBuilder() + .withProvider(provider) + .withConfig(config) + .build(); + + reader.reset(); + namespace = jsonb.fromJson(reader, NamespaceConfig.class); + + if (!errors.isEmpty()) + { + break read; + } + } + catch (Exception ex) + { + errors.add(ex); + } + + if (!errors.isEmpty()) + { + Exception ex = errors.remove(0); + errors.forEach(ex::addSuppressed); + rethrowUnchecked(ex); + } + + return namespace; + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java new file mode 100644 index 0000000000..ccce543404 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java @@ -0,0 +1,102 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static org.agrona.LangUtil.rethrowUnchecked; + +import java.io.StringWriter; +import java.io.Writer; +import java.util.LinkedList; +import java.util.List; + +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; +import jakarta.json.bind.JsonbConfig; +import jakarta.json.spi.JsonProvider; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLMapper; + +import io.aklivity.zilla.runtime.engine.internal.config.NamespaceAdapter; + +public final class ConfigWriter +{ + private final ConfigAdapterContext context; + + public ConfigWriter( + ConfigAdapterContext context) + { + this.context = context; + } + + public void write( + NamespaceConfig namespace, + Writer writer) + { + write0(namespace, writer); + } + + public String write( + NamespaceConfig namespace) + { + StringWriter writer = new StringWriter(); + write0(namespace, writer); + return writer.toString(); + } + + private void write0( + NamespaceConfig namespace, + Writer writer) + { + List errors = new LinkedList<>(); + + write: + try + { + // TODO: YamlProvider (supporting YamlGenerator) + JsonProvider provider = JsonProvider.provider(); + + JsonbConfig config = new JsonbConfig() + .withAdapters(new NamespaceAdapter(context)) + .withFormatting(true); + Jsonb jsonb = JsonbBuilder.newBuilder() + .withProvider(provider) + .withConfig(config) + .build(); + + String jsonText = jsonb.toJson(namespace, NamespaceConfig.class); + JsonNode json = new ObjectMapper().readTree(jsonText); + new YAMLMapper().writeValue(writer, json); + + if (!errors.isEmpty()) + { + break write; + } + } + catch (Exception ex) + { + errors.add(ex); + } + + if (!errors.isEmpty()) + { + Exception ex = errors.remove(0); + errors.forEach(ex::addSuppressed); + rethrowUnchecked(ex); + } + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/KindAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/KindAdapter.java index 0b31a9bc91..c49078528f 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/KindAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/KindAdapter.java @@ -24,12 +24,9 @@ public class KindAdapter implements JsonbAdapter { - private final ConfigAdapterContext context; - public KindAdapter( ConfigAdapterContext context) { - this.context = context; } @Override diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamspaceRefAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamspaceRefAdapter.java index 62728ea756..46288f06e9 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamspaceRefAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamspaceRefAdapter.java @@ -36,12 +36,9 @@ public class NamspaceRefAdapter implements JsonbAdapter LINKS_DEFAULT = emptyMap(); - private final ConfigAdapterContext context; - public NamspaceRefAdapter( ConfigAdapterContext context) { - this.context = context; } @Override diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/RouteAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/RouteAdapter.java index aa09292b0c..c14ca66dc6 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/RouteAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/RouteAdapter.java @@ -43,15 +43,14 @@ public class RouteAdapter implements JsonbAdapter private static final String WITH_NAME = "with"; private static final String GUARDED_NAME = "guarded"; - private int index; private final ConditionAdapter condition; private final WithAdapter with; - private ConfigAdapterContext context; + + private int index; public RouteAdapter( ConfigAdapterContext context) { - this.context = context; condition = new ConditionAdapter(); with = new WithAdapter(); } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryRefAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryRefAdapter.java index 3ef1114f1b..d99e230712 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryRefAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryRefAdapter.java @@ -22,8 +22,6 @@ import jakarta.json.JsonArrayBuilder; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; -import jakarta.json.JsonString; -import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.engine.config.MetricRefConfig; @@ -62,10 +60,4 @@ public TelemetryRefConfig adaptFromJson( : List.of(); return new TelemetryRefConfig(metricRefs); } - - private static String asJsonString( - JsonValue value) - { - return ((JsonString) value).getString(); - } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/json/SchemaDecorator.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/schema/SchemaDecorator.java similarity index 98% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/json/SchemaDecorator.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/schema/SchemaDecorator.java index 4328d7a9cf..820de7d69a 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/json/SchemaDecorator.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/schema/SchemaDecorator.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.internal.registry.json; +package io.aklivity.zilla.runtime.engine.internal.config.schema; import java.net.URI; import java.util.stream.Stream; diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/json/UniquePropertyKeysSchema.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/schema/UniquePropertyKeysSchema.java similarity index 98% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/json/UniquePropertyKeysSchema.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/schema/UniquePropertyKeysSchema.java index 783ff8156b..2be041d77b 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/json/UniquePropertyKeysSchema.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/schema/UniquePropertyKeysSchema.java @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.internal.registry.json; +package io.aklivity.zilla.runtime.engine.internal.config.schema; import java.util.Deque; import java.util.HashSet; diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/ConfigurationManager.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/ConfigurationManager.java index 81d79c0f14..b1214fe167 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/ConfigurationManager.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/ConfigurationManager.java @@ -15,16 +15,11 @@ */ package io.aklivity.zilla.runtime.engine.internal.registry; -import static jakarta.json.stream.JsonGenerator.PRETTY_PRINTING; -import static java.util.Collections.singletonMap; - -import java.io.InputStream; import java.io.StringReader; -import java.io.StringWriter; import java.net.URL; +import java.util.Arrays; import java.util.Collection; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -37,25 +32,10 @@ import java.util.function.ToIntFunction; import java.util.regex.Pattern; -import jakarta.json.JsonArray; -import jakarta.json.JsonObject; -import jakarta.json.JsonPatch; -import jakarta.json.JsonReader; -import jakarta.json.bind.Jsonb; -import jakarta.json.bind.JsonbBuilder; -import jakarta.json.bind.JsonbConfig; -import jakarta.json.spi.JsonProvider; -import jakarta.json.stream.JsonParser; - -import org.leadpony.justify.api.JsonSchema; -import org.leadpony.justify.api.JsonSchemaReader; -import org.leadpony.justify.api.JsonValidationService; -import org.leadpony.justify.api.ProblemHandler; - -import io.aklivity.zilla.runtime.engine.Engine; import io.aklivity.zilla.runtime.engine.EngineConfiguration; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; +import io.aklivity.zilla.runtime.engine.config.ConfigReader; import io.aklivity.zilla.runtime.engine.config.GuardConfig; import io.aklivity.zilla.runtime.engine.config.GuardedConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; @@ -69,8 +49,6 @@ import io.aklivity.zilla.runtime.engine.ext.EngineExtSpi; import io.aklivity.zilla.runtime.engine.guard.Guard; import io.aklivity.zilla.runtime.engine.internal.Tuning; -import io.aklivity.zilla.runtime.engine.internal.config.NamespaceAdapter; -import io.aklivity.zilla.runtime.engine.internal.registry.json.UniquePropertyKeysSchema; import io.aklivity.zilla.runtime.engine.internal.stream.NamespacedId; public class ConfigurationManager @@ -134,71 +112,16 @@ public NamespaceConfig parse( configText = expressions.resolve(configText); } - List errors = new LinkedList<>(); - parse: try { - //TODO: detect configURLs and call handleConfigURL - InputStream schemaInput = Engine.class.getResourceAsStream("internal/schema/engine.schema.json"); - - JsonProvider schemaProvider = JsonProvider.provider(); - JsonReader schemaReader = schemaProvider.createReader(schemaInput); - JsonObject schemaObject = schemaReader.readObject(); - - for (URL schemaType : schemaTypes) - { - InputStream schemaPatchInput = schemaType.openStream(); - JsonReader schemaPatchReader = schemaProvider.createReader(schemaPatchInput); - JsonArray schemaPatchArray = schemaPatchReader.readArray(); - JsonPatch schemaPatch = schemaProvider.createPatch(schemaPatchArray); - - schemaObject = schemaPatch.apply(schemaObject); - } - - if (config.verboseSchema()) - { - final StringWriter out = new StringWriter(); - schemaProvider.createGeneratorFactory(singletonMap(PRETTY_PRINTING, true)) - .createGenerator(out) - .write(schemaObject) - .close(); - - final String schemaText = out.getBuffer().toString(); - logger.accept(schemaText); - } - - JsonParser schemaParser = schemaProvider.createParserFactory(null) - .createParser(new StringReader(schemaObject.toString())); - - JsonValidationService service = JsonValidationService.newInstance(); - ProblemHandler handler = service.createProblemPrinter(errors::add); - JsonSchemaReader reader = service.createSchemaReader(schemaParser); - JsonSchema schema = new UniquePropertyKeysSchema(reader.read()); - - JsonProvider provider = service.createJsonProvider(schema, parser -> handler); - provider.createReader(new StringReader(configText)).read(); - - if (!errors.isEmpty()) - { - break parse; - } - final Function namespaceReadURL = l -> readURL.apply(configURL, l); - JsonbConfig config = new JsonbConfig() - .withAdapters(new NamespaceAdapter(new NamespaceConfigAdapterContext(namespaceReadURL))); - Jsonb jsonb = JsonbBuilder.newBuilder() - .withProvider(provider) - .withConfig(config) - .build(); - - namespace = jsonb.fromJson(configText, NamespaceConfig.class); - - if (!errors.isEmpty()) - { - break parse; - } + ConfigReader reader = new ConfigReader( + new NamespaceConfigAdapterContext(namespaceReadURL), + schemaTypes, + config.verboseSchema() ? logger : null); + namespace = reader.read(new StringReader(configText)); namespace.id = supplyId.applyAsInt(namespace.name); namespace.readURL = namespaceReadURL; @@ -281,12 +204,11 @@ public NamespaceConfig parse( catch (Throwable ex) { logError(ex.getMessage()); + Arrays.stream(ex.getSuppressed()) + .map(Throwable::getMessage) + .forEach(logger); } - if (!errors.isEmpty()) - { - errors.forEach(this::logError); - } return namespace; } diff --git a/runtime/engine/src/main/moditect/module-info.java b/runtime/engine/src/main/moditect/module-info.java index a3f60b0c5a..489e9acb67 100644 --- a/runtime/engine/src/main/moditect/module-info.java +++ b/runtime/engine/src/main/moditect/module-info.java @@ -38,6 +38,7 @@ requires transitive jakarta.json.bind; requires transitive org.agrona.core; requires org.leadpony.justify; + requires com.fasterxml.jackson.dataformat.yaml; requires jdk.unsupported; requires java.net.http; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java new file mode 100644 index 0000000000..30abafa078 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static java.util.Collections.emptyList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; +import org.mockito.quality.Strictness; + +public class ConfigWriterTest +{ + @Rule + public MockitoRule rule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS); + + @Mock + private ConfigAdapterContext context; + + private ConfigWriter yaml; + + @Before + public void initYaml() + { + yaml = new ConfigWriter(context); + } + + @Test + public void shouldWriteNamespace() + { + NamespaceConfig config = new NamespaceConfig("test", emptyList(), null, emptyList(), emptyList(), emptyList()); + + String text = yaml.write(config); + + assertThat(text, not(nullValue())); + assertThat(text, equalTo("---\nname: \"test\"\n")); + } +} diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfig.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfig.java similarity index 95% rename from runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfig.java rename to runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfig.java index 764b9fe8cd..4b71b9a2a9 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfig.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.guard.jwt.internal.config; +package io.aklivity.zilla.runtime.guard.jwt.config; public class JwtKeyConfig { diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfig.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeySetConfig.java similarity index 92% rename from runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfig.java rename to runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeySetConfig.java index 0e8ff0ca36..269fc1356c 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfig.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeySetConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.guard.jwt.internal.config; +package io.aklivity.zilla.runtime.guard.jwt.config; import java.util.List; diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfig.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfig.java similarity index 96% rename from runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfig.java rename to runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfig.java index 302cd94c7c..5e1481707c 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfig.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfig.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.guard.jwt.internal.config; +package io.aklivity.zilla.runtime.guard.jwt.config; import static java.util.Optional.ofNullable; diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardContext.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardContext.java index 0d6cf7f4f0..1412213c25 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardContext.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardContext.java @@ -22,7 +22,7 @@ import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.config.GuardConfig; import io.aklivity.zilla.runtime.engine.guard.GuardContext; -import io.aklivity.zilla.runtime.guard.jwt.internal.config.JwtOptionsConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; final class JwtGuardContext implements GuardContext { diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandler.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandler.java index db5619973a..54f2358905 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandler.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandler.java @@ -43,10 +43,10 @@ import org.jose4j.lang.JoseException; import io.aklivity.zilla.runtime.engine.guard.GuardHandler; -import io.aklivity.zilla.runtime.guard.jwt.internal.config.JwtKeyConfig; -import io.aklivity.zilla.runtime.guard.jwt.internal.config.JwtKeySetConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeySetConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; import io.aklivity.zilla.runtime.guard.jwt.internal.config.JwtKeySetConfigAdapter; -import io.aklivity.zilla.runtime.guard.jwt.internal.config.JwtOptionsConfig; public class JwtGuardHandler implements GuardHandler { diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfigAdapter.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfigAdapter.java index 51a7e7546a..2a61619941 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfigAdapter.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfigAdapter.java @@ -19,6 +19,8 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; + public final class JwtKeyConfigAdapter implements JsonbAdapter { private static final String ALG_NAME = "alg"; diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfigAdapter.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfigAdapter.java index d8ef3f5dc6..d554efe2b4 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfigAdapter.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfigAdapter.java @@ -25,6 +25,9 @@ import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeySetConfig; + public final class JwtKeySetConfigAdapter implements JsonbAdapter { private static final String KEYS_NAME = "keys"; diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapter.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapter.java index d9d1e47844..4c0077ccb1 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapter.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapter.java @@ -31,6 +31,8 @@ import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; import io.aklivity.zilla.runtime.guard.jwt.internal.JwtGuard; public final class JwtOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter diff --git a/runtime/guard-jwt/src/main/moditect/module-info.java b/runtime/guard-jwt/src/main/moditect/module-info.java index 200457b394..0d9410c4d4 100644 --- a/runtime/guard-jwt/src/main/moditect/module-info.java +++ b/runtime/guard-jwt/src/main/moditect/module-info.java @@ -17,6 +17,8 @@ requires io.aklivity.zilla.runtime.engine; requires org.jose4j; + exports io.aklivity.zilla.runtime.guard.jwt.config; + provides io.aklivity.zilla.runtime.engine.guard.GuardFactorySpi with io.aklivity.zilla.runtime.guard.jwt.internal.JwtGuardFactorySpi; diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java index 5fe73952bf..a859687c1a 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java @@ -36,7 +36,7 @@ import org.jose4j.lang.JoseException; import org.junit.Test; -import io.aklivity.zilla.runtime.guard.jwt.internal.config.JwtOptionsConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; public class JwtGuardHandlerTest { diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java index 54562a4dbe..ccf797c2a0 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java @@ -42,7 +42,7 @@ import io.aklivity.zilla.runtime.engine.guard.GuardContext; import io.aklivity.zilla.runtime.engine.guard.GuardFactory; import io.aklivity.zilla.runtime.engine.guard.GuardHandler; -import io.aklivity.zilla.runtime.guard.jwt.internal.config.JwtOptionsConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; public class JwtGuardTest { diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfigAdapterTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfigAdapterTest.java index d5a2a3d968..225813bc1e 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfigAdapterTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeySetConfigAdapterTest.java @@ -31,6 +31,8 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeySetConfig; + public class JwtKeySetConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapterTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapterTest.java index 91325631f7..cd238e5577 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapterTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapterTest.java @@ -31,6 +31,9 @@ import org.junit.Before; import org.junit.Test; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; + public class JwtOptionsConfigAdapterTest { private Jsonb jsonb; diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/keys/JwtKeyConfigs.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/keys/JwtKeyConfigs.java index 53b45b3dff..61ab7ccad1 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/keys/JwtKeyConfigs.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/keys/JwtKeyConfigs.java @@ -14,7 +14,7 @@ */ package io.aklivity.zilla.runtime.guard.jwt.internal.keys; -import io.aklivity.zilla.runtime.guard.jwt.internal.config.JwtKeyConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; public final class JwtKeyConfigs { diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfig.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfig.java similarity index 74% rename from runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfig.java rename to runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfig.java index 23a75d231f..1d5fb70ca9 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfig.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfig.java @@ -13,20 +13,20 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.vault.filesystem.internal.config; +package io.aklivity.zilla.runtime.vault.filesystem.config; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; public class FileSystemOptionsConfig extends OptionsConfig { - public final FileSystemStore keys; - public final FileSystemStore trust; - public final FileSystemStore signers; + public final FileSystemStoreConfig keys; + public final FileSystemStoreConfig trust; + public final FileSystemStoreConfig signers; public FileSystemOptionsConfig( - FileSystemStore keys, - FileSystemStore trust, - FileSystemStore signers) + FileSystemStoreConfig keys, + FileSystemStoreConfig trust, + FileSystemStoreConfig signers) { this.keys = keys; this.trust = trust; diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStore.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfig.java similarity index 87% rename from runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStore.java rename to runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfig.java index 3e9e61031c..e149e22f64 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStore.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfig.java @@ -13,15 +13,15 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.vault.filesystem.internal.config; +package io.aklivity.zilla.runtime.vault.filesystem.config; -public class FileSystemStore +public class FileSystemStoreConfig { public final String store; public final String type; public final String password; - public FileSystemStore( + public FileSystemStoreConfig( String store, String type, String password) diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemContext.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemContext.java index d98e32d6e3..0b303d4bc6 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemContext.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemContext.java @@ -22,7 +22,7 @@ import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.config.VaultConfig; import io.aklivity.zilla.runtime.engine.vault.VaultContext; -import io.aklivity.zilla.runtime.vault.filesystem.internal.config.FileSystemOptionsConfig; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; final class FileSystemContext implements VaultContext { diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultHandler.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultHandler.java index dc3ee231c0..c465063d99 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultHandler.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultHandler.java @@ -36,8 +36,8 @@ import org.agrona.LangUtil; import io.aklivity.zilla.runtime.engine.vault.VaultHandler; -import io.aklivity.zilla.runtime.vault.filesystem.internal.config.FileSystemOptionsConfig; -import io.aklivity.zilla.runtime.vault.filesystem.internal.config.FileSystemStore; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemStoreConfig; public class FileSystemVaultHandler implements VaultHandler { @@ -95,21 +95,21 @@ public PrivateKeyEntry[] keys( private static Function supplyLookupPrivateKeyEntry( Function resolvePath, - FileSystemStore aliases) + FileSystemStoreConfig aliases) { return supplyLookupAlias(resolvePath, aliases, FileSystemVaultHandler::lookupPrivateKeyEntry); } private static Function supplyLookupTrustedCertificateEntry( Function resolvePath, - FileSystemStore aliases) + FileSystemStoreConfig aliases) { return supplyLookupAlias(resolvePath, aliases, FileSystemVaultHandler::lookupTrustedCertificateEntry); } private Function, KeyStore.PrivateKeyEntry[]> supplyLookupPrivateKeyEntries( Function resolvePath, - FileSystemStore entries) + FileSystemStoreConfig entries) { Function, KeyStore.PrivateKeyEntry[]> lookupKeys = p -> null; @@ -166,7 +166,7 @@ private Function, KeyStore.PrivateKeyEntry[]> supplyLoo private static Function supplyLookupAlias( Function resolvePath, - FileSystemStore aliases, + FileSystemStoreConfig aliases, Lookup lookup) { Function lookupAlias = a -> null; diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapter.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapter.java index 6e7f1dd889..89aaf54c9b 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapter.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapter.java @@ -22,6 +22,8 @@ import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemStoreConfig; import io.aklivity.zilla.runtime.vault.filesystem.internal.FileSystemVault; public final class FileSystemOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter @@ -30,7 +32,7 @@ public final class FileSystemOptionsConfigAdapter implements OptionsConfigAdapte private static final String TRUST_NAME = "trust"; private static final String SIGNERS_NAME = "signers"; - private final FileSystemStoreAdapter store = new FileSystemStoreAdapter(); + private final FileSystemStoreConfigAdapter store = new FileSystemStoreConfigAdapter(); @Override public String type() @@ -74,13 +76,13 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - FileSystemStore keys = object.containsKey(KEYS_NAME) + FileSystemStoreConfig keys = object.containsKey(KEYS_NAME) ? store.adaptFromJson(object.getJsonObject(KEYS_NAME)) : null; - FileSystemStore trust = object.containsKey(TRUST_NAME) + FileSystemStoreConfig trust = object.containsKey(TRUST_NAME) ? store.adaptFromJson(object.getJsonObject(TRUST_NAME)) : null; - FileSystemStore signers = object.containsKey(SIGNERS_NAME) + FileSystemStoreConfig signers = object.containsKey(SIGNERS_NAME) ? store.adaptFromJson(object.getJsonObject(SIGNERS_NAME)) : null; diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreAdapter.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreConfigAdapter.java similarity index 83% rename from runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreAdapter.java rename to runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreConfigAdapter.java index e3b128a2ad..f69447acf3 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreAdapter.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreConfigAdapter.java @@ -20,7 +20,9 @@ import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; -public final class FileSystemStoreAdapter implements JsonbAdapter +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemStoreConfig; + +public final class FileSystemStoreConfigAdapter implements JsonbAdapter { private static final String STORE_NAME = "store"; private static final String TYPE_NAME = "type"; @@ -28,7 +30,7 @@ public final class FileSystemStoreAdapter implements JsonbAdapter Date: Mon, 7 Aug 2023 13:43:24 -0700 Subject: [PATCH 012/115] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2e5a62fdfe..a641c56cbf 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ Zilla abstracts Apache Kafka® for web applications, IoT clients and microservic Zilla has no external dependencies and does not rely on the Kafka Consumer/Producer API or Kafka Connect. Instead, it natively supports the Kafka wire protocol and uses advanced protocol mediation to establish stateless API entry points into Kafka. Zilla also addresses security enforcement, observability and connection offloading on the data path. -When Zilla is deployed alongside Apache Kafka®, achieving an extensible yet streamlined event-driven architecture becomes much easier. +When Zilla is deployed alongside Apache Kafka®, achieving an extensible yet streamlined event-driven architecture becomes possible. ## Contents From 5c8a84633261bb75605ecf4a549e1b54d9a978ad Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Tue, 8 Aug 2023 11:06:41 -0700 Subject: [PATCH 013/115] Include member count as part of group data ex (#327) --- .../stream/KafkaClientGroupFactory.java | 4 +++- .../kafka/internal/KafkaFunctions.java | 24 ++++++++++++++++++- .../main/resources/META-INF/zilla/kafka.idl | 1 + .../client.rpt | 3 ++- .../server.rpt | 1 + .../application/group/leader/client.rpt | 3 ++- .../application/group/leader/server.rpt | 1 + .../client.rpt | 4 +++- .../server.rpt | 2 ++ .../rebalance.protocol.highlander/client.rpt | 6 +++-- .../rebalance.protocol.highlander/server.rpt | 2 ++ .../rebalance.protocol.unknown/client.rpt | 3 ++- .../rebalance.protocol.unknown/server.rpt | 1 + .../kafka/internal/KafkaFunctionsTest.java | 6 ++++- 14 files changed, 52 insertions(+), 9 deletions(-) diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index d7d5265332..2918246b4a 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -2906,7 +2906,7 @@ private void onSyncGroupResponse( delegate.doApplicationData(traceId, authorization, assignment, ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) .typeId(kafkaTypeId) - .group(g -> g.leaderId(leader).memberId(memberId)) + .group(g -> g.leaderId(leader).memberId(memberId).members(members.size())) .build() .sizeof())); @@ -2941,6 +2941,8 @@ private void onLeaveGroupResponse( long traceId, long authorization) { + delegate.groupMembership.memberIds.remove(delegate.groupId); + doNetworkEnd(traceId, authorization); doNetworkReset(traceId); diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index 851c7e7f3b..5d5516f7b5 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -1597,6 +1597,13 @@ public KafkaGroupDataExBuilder memberId( return this; } + public KafkaGroupDataExBuilder members( + int members) + { + groupDataExRW.members(members); + return this; + } + public KafkaDataExBuilder build() { final KafkaGroupDataExFW groupDataEx = groupDataExRW.build(); @@ -2722,6 +2729,7 @@ public final class KafkaGroupDataExMatchBuilder { private String16FW leaderId; private String16FW memberId; + private Integer members; private KafkaGroupDataExMatchBuilder() { @@ -2741,6 +2749,13 @@ public KafkaGroupDataExMatchBuilder memberId( return this; } + public KafkaGroupDataExMatchBuilder members( + int members) + { + this.members = Integer.valueOf(members); + return this; + } + public KafkaDataExMatcherBuilder build() { return KafkaDataExMatcherBuilder.this; @@ -2751,7 +2766,8 @@ private boolean match( { final KafkaGroupDataExFW groupDataEx = dataEx.group(); return matchLeaderId(groupDataEx) && - matchMemberId(groupDataEx); + matchMemberId(groupDataEx) && + matchmembers(groupDataEx); } private boolean matchLeaderId( @@ -2765,6 +2781,12 @@ private boolean matchMemberId( { return memberId == null || memberId.equals(groupDataEx.memberId()); } + + private boolean matchmembers( + final KafkaGroupDataExFW groupDataEx) + { + return members != null && members == groupDataEx.members(); + } } } diff --git a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl index ea3adcc86c..9f4a432235 100644 --- a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl +++ b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl @@ -335,6 +335,7 @@ scope kafka { string16 leaderId; string16 memberId; + int32 members; } } } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt index 34d5be7b16..f89aa1e6a2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt @@ -31,11 +31,12 @@ connected write advise zilla:flush -read zilla:data.ext ${kafka:dataEx() +read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} read zilla:data.null diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt index e571392836..84a8447aee 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt @@ -40,6 +40,7 @@ write zilla:data.ext ${kafka:dataEx() .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt index ea472ace1c..866322fc50 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt @@ -30,11 +30,12 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:dataEx() +read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} read zilla:data.null diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt index 178b40bc4d..553ae19f43 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt @@ -38,6 +38,7 @@ write zilla:data.ext ${kafka:dataEx() .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt index 567e4f7725..be5f60d06a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -35,6 +35,7 @@ read zilla:data.ext ${kafka:dataEx() .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} read zilla:data.null @@ -59,11 +60,12 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:dataEx() +read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} read zilla:data.null diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt index ff4baeea4a..81ef4af79e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -38,6 +38,7 @@ write zilla:data.ext ${kafka:dataEx() .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} write flush @@ -62,6 +63,7 @@ write zilla:data.ext ${kafka:dataEx() .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt index b7765fa2a4..f090d653d8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt @@ -30,22 +30,24 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:dataEx() +read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} read zilla:data.null write advise zilla:flush -read zilla:data.ext ${kafka:dataEx() +read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(2) .build() .build()} read zilla:data.null diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt index 4f0f18bb9c..9a205e75a5 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt @@ -38,6 +38,7 @@ write zilla:data.ext ${kafka:dataEx() .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} write flush @@ -49,6 +50,7 @@ write zilla:data.ext ${kafka:dataEx() .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(2) .build() .build()} write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt index b8293ceeca..4fd5d1aa57 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt @@ -30,11 +30,12 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:dataEx() +read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} read zilla:data.null diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt index a72a8ba9f8..21cb33f7aa 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt @@ -38,6 +38,7 @@ write zilla:data.ext ${kafka:dataEx() .group() .leaderId("memberId-1") .memberId("memberId-1") + .members(1) .build() .build()} write flush diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index 1d74e8eecb..ce08684543 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -3832,6 +3832,7 @@ public void shouldGenerateGroupDataExtension() .group() .leaderId("test1") .memberId("test2") + .members(2) .build() .build(); @@ -3843,6 +3844,7 @@ public void shouldGenerateGroupDataExtension() final KafkaGroupDataExFW groupDataEx = dataEx.group(); assertEquals("test1", groupDataEx.leaderId().asString()); assertEquals("test2", groupDataEx.memberId().asString()); + assertTrue(groupDataEx.members() == 2); } @Test @@ -3853,6 +3855,7 @@ public void shouldMatchGroupDataExtension() throws Exception .group() .leaderId("test1") .memberId("test2") + .members(2) .build() .build(); @@ -3863,7 +3866,8 @@ public void shouldMatchGroupDataExtension() throws Exception .typeId(0x01) .group(f -> f .leaderId("test1") - .memberId("test2")) + .memberId("test2") + .members(2)) .build(); assertNotNull(matcher.match(byteBuf)); From 758dc3e9eafd68dddbe3212ad04330ebcf50d196 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Wed, 9 Aug 2023 08:40:12 -0700 Subject: [PATCH 014/115] Default group session timeout (#328) * Default group session timeout to broker value if it is greater then default value --- .../stream/KafkaCacheGroupFactory.java | 36 +++++++++++++++-- .../stream/KafkaClientGroupFactory.java | 39 ++++++++++++++++++- .../client.rpt | 9 +++++ .../server.rpt | 10 +++++ .../application/group/leader/client.rpt | 8 ++++ .../application/group/leader/server.rpt | 10 +++++ .../client.rpt | 16 ++++++++ .../server.rpt | 20 ++++++++++ .../rebalance.protocol.highlander/client.rpt | 8 ++++ .../rebalance.protocol.highlander/server.rpt | 10 +++++ .../rebalance.protocol.unknown/client.rpt | 8 ++++ .../rebalance.protocol.unknown/server.rpt | 10 +++++ .../coordinator.not.available/client.rpt | 4 +- .../coordinator.not.available/server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../rebalance.protocol.highlander/client.rpt | 6 +-- .../rebalance.protocol.highlander/server.rpt | 6 +-- .../rebalance.protocol.unknown/client.rpt | 2 +- .../rebalance.protocol.unknown/server.rpt | 2 +- .../rebalance.sync.group/client.rpt | 4 +- .../rebalance.sync.group/server.rpt | 4 +- .../leader/client.rpt | 2 +- .../leader/server.rpt | 2 +- 30 files changed, 213 insertions(+), 39 deletions(-) diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java index c27fe29960..e4e4ceb6a9 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java @@ -205,6 +205,35 @@ private void doBegin( receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); } + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + private void doData( MessageConsumer receiver, long originId, @@ -619,7 +648,7 @@ private void onGroupReplyBegin( state = KafkaState.openingReply(state); - delegate.doGroupReplyBegin(traceId); + delegate.doGroupReplyBegin(traceId, begin.extension()); } private void onGroupReplyData( @@ -928,12 +957,13 @@ private void doGroupInitialWindow( } private void doGroupReplyBegin( - long traceId) + long traceId, + OctetsFW extension) { state = KafkaState.openingReply(state); doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, affinity, EMPTY_EXTENSION); + traceId, authorization, affinity, extension); } private void doGroupReplyData( diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index 2918246b4a..4660710a84 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -251,7 +251,7 @@ public MessageConsumer newStream( final KafkaBindingConfig binding = supplyBinding.apply(routedId); final KafkaRouteConfig resolved; - final int timeout = kafkaGroupBeginEx.timeout(); + final int timeout = Math.min(kafkaGroupBeginEx.timeout(), 30_000); final String groupId = kafkaGroupBeginEx.groupId().asString(); final String protocol = kafkaGroupBeginEx.protocol().asString(); @@ -372,6 +372,35 @@ private void doBegin( receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); } + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + private void doData( MessageConsumer receiver, long originId, @@ -1189,8 +1218,14 @@ private void doApplicationBegin( { state = KafkaState.openingReply(state); + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .group(g -> g.groupId(groupId).protocol(protocol).timeout(timeout)) + .build(); + doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, affinity, EMPTY_EXTENSION); + traceId, authorization, affinity, kafkaBeginEx); } private void doApplicationData( diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt index f89aa1e6a2..cb2deade38 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt @@ -29,6 +29,15 @@ write zilla:begin.ext ${kafka:beginEx() connected +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt index 84a8447aee..52b3df2fa1 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt @@ -33,6 +33,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt index 866322fc50..e1d426f7bc 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt @@ -29,6 +29,14 @@ write zilla:begin.ext ${kafka:beginEx() connected +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt index 553ae19f43..e294b4e29e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt @@ -33,6 +33,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .group() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt index be5f60d06a..0ad5b88eb7 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -29,6 +29,14 @@ write zilla:begin.ext ${kafka:beginEx() connected +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) @@ -59,6 +67,14 @@ write zilla:begin.ext ${kafka:beginEx() connected +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt index 81ef4af79e..612219b422 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -33,6 +33,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .group() @@ -58,6 +68,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .group() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt index f090d653d8..97fe944ccb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt @@ -29,6 +29,14 @@ write zilla:begin.ext ${kafka:beginEx() connected +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt index 9a205e75a5..3cad0f6c66 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt @@ -33,6 +33,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .group() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt index 4fd5d1aa57..d148031a57 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt @@ -29,6 +29,14 @@ write zilla:begin.ext ${kafka:beginEx() connected +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("unknown") + .timeout(30000) + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt index 21cb33f7aa..b2ffbdad10 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt @@ -33,6 +33,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("unknown") + .timeout(30000) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .group() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt index 34b66f8209..b31879fae3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt @@ -82,7 +82,7 @@ write 105 # size ${newRequestId} 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id @@ -107,7 +107,7 @@ write 115 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt index 960d26d2d4..c8ee99cba2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt @@ -72,7 +72,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id @@ -98,7 +98,7 @@ read 115 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s [0..42] # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt index bb8ccf51e6..661394f51f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt @@ -65,7 +65,7 @@ write 105 # size ${newRequestId} 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id @@ -133,7 +133,7 @@ write 105 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt index 6f7f230d1a..0501e11c66 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt @@ -55,7 +55,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id @@ -111,7 +111,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt index 8697a0555f..3ebf38ef03 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt @@ -65,7 +65,7 @@ write 105 # size ${newRequestId} 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id @@ -90,7 +90,7 @@ write 115 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt index f71705ab1e..dd66906ce9 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt @@ -55,7 +55,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id @@ -81,7 +81,7 @@ read 115 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s [0..42] # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt index 71cb8605e4..4fece6bbfb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -65,7 +65,7 @@ write 105 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id @@ -153,7 +153,7 @@ write 115 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt index efaee4ce49..62dd9d1a4f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -55,7 +55,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id @@ -133,7 +133,7 @@ read 115 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s [0..42] # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt index b956d5e1f1..d395166ffd 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt @@ -65,7 +65,7 @@ write 105 # size ${newRequestId} 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id @@ -90,7 +90,7 @@ write 105 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt index 06ea422a7a..cf3f407a26 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt @@ -55,7 +55,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id @@ -81,7 +81,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt index 5165d45fd2..2bfabe1123 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt @@ -65,7 +65,7 @@ write 105 # size ${newRequestId} 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id @@ -90,7 +90,7 @@ write 115 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s ${instanceId} # group instance id @@ -152,7 +152,7 @@ write 115 # size ${newRequestId} 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt index 077b3cffc5..099f0a0a68 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt @@ -55,7 +55,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id @@ -81,7 +81,7 @@ read 115 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s [0..42] # group instance id @@ -143,7 +143,7 @@ read 115 # size (int:newRequestId) 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt index 03968c7817..c73a41889a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt @@ -65,7 +65,7 @@ write 102 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt index aecd6f1d65..75e53e1bbd 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt @@ -55,7 +55,7 @@ read 102 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt index 8d198b5a1f..6dea7afae8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt @@ -65,7 +65,7 @@ write 105 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id @@ -112,7 +112,7 @@ write 115 # size ${newRequestId} 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt index 7543f130d7..e3a812d391 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt @@ -55,7 +55,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id @@ -102,7 +102,7 @@ read 115 # size (int:newRequestId) 5s "zilla" # no client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 10s "memberId-1" # consumer group member 42s [0..42] # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt index a12b220d76..6f082da9a8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt @@ -123,7 +123,7 @@ write 105 # size ${newRequestId} 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s ${instanceId} # group instance id diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt index 7d5eea0df9..44d6e465b8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt @@ -113,7 +113,7 @@ read 105 # size (int:newRequestId) 5s "zilla" # client id 4s "test" # consumer group - 45000 # session timeout + 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member 42s [0..42] # group instance id From 2448142619aed706cb707cdf329e823b34407397 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Thu, 10 Aug 2023 16:19:46 +0200 Subject: [PATCH 015/115] Mqtt kafka sessions (#318) --- .../client.rpt | 298 +++ .../server.rpt | 274 +++ .../session.client.sent.reset/client.rpt | 104 ++ .../session.client.sent.reset/server.rpt | 97 + .../kafka/session.client.takeover/client.rpt | 361 ++++ .../kafka/session.client.takeover/server.rpt | 351 ++++ .../session.exists.clean.start/client.rpt | 326 ++++ .../session.exists.clean.start/server.rpt | 324 ++++ .../client.rpt | 104 ++ .../server.rpt | 100 + .../session.server.sent.reset/client.rpt | 102 + .../session.server.sent.reset/server.rpt | 96 + .../client.rpt | 152 ++ .../server.rpt | 144 ++ .../kafka/session.subscribe/client.rpt | 168 ++ .../kafka/session.subscribe/server.rpt | 162 ++ .../client.rpt | 192 ++ .../server.rpt | 183 ++ .../client.rpt | 163 ++ .../server.rpt | 152 ++ .../client.rpt | 109 ++ .../server.rpt | 102 + .../mqtt/session.client.sent.reset/client.rpt | 32 + .../mqtt/session.client.sent.reset/server.rpt | 35 + .../mqtt/session.client.takeover/client.rpt | 107 ++ .../mqtt/session.client.takeover/server.rpt | 104 ++ .../session.exists.clean.start/client.rpt | 87 + .../session.exists.clean.start/server.rpt | 77 + .../mqtt/session.server.sent.reset/client.rpt | 32 + .../mqtt/session.server.sent.reset/server.rpt | 35 + .../client.rpt | 59 + .../server.rpt | 61 + .../streams/mqtt/session.subscribe/client.rpt | 68 + .../streams/mqtt/session.subscribe/server.rpt | 64 + .../client.rpt | 76 + .../server.rpt | 75 + .../client.rpt | 63 + .../server.rpt | 63 + .../mqtt/subscribe.one.message/client.rpt | 2 +- .../binding/mqtt/kafka/streams/KafkaIT.java | 90 + .../binding/mqtt/kafka/streams/MqttIT.java | 81 + .../internal/MqttKafkaConfiguration.java | 73 +- .../stream/MqttKafkaProxyFactory.java | 16 +- .../stream/MqttKafkaPublishFactory.java | 19 +- .../stream/MqttKafkaSessionFactory.java | 1646 +++++++++++++++++ .../stream/MqttKafkaStreamFactory.java | 22 +- .../stream/MqttKafkaSubscribeFactory.java | 2 +- .../internal/MqttKafkaConfigurationTest.java | 12 +- .../stream/MqttKafkaSessionProxyIT.java | 154 ++ .../binding/mqtt/internal/MqttFunctions.java | 7 - .../main/resources/META-INF/zilla/mqtt.idl | 2 +- .../client.rpt | 3 - .../server.rpt | 3 - .../session.client.takeover/client.rpt | 3 - .../session.client.takeover/server.rpt | 3 - .../session.exists.clean.start/client.rpt | 3 - .../session.exists.clean.start/server.rpt | 3 - .../session.server.sent.abort/client.rpt | 53 + .../session.server.sent.abort/server.rpt | 32 + .../client.rpt | 72 + .../server.rpt | 75 + .../client.rpt | 1 - .../server.rpt | 1 - .../application/session.subscribe/client.rpt | 2 - .../application/session.subscribe/server.rpt | 2 - .../client.rpt | 4 - .../server.rpt | 4 - .../client.rpt | 4 - .../server.rpt | 4 - .../client.rpt | 3 - .../server.rpt | 3 - .../client.rpt | 59 + .../server.rpt | 60 + .../mqtt/internal/MqttFunctionsTest.java | 2 - .../mqtt/streams/application/SessionIT.java | 9 + .../mqtt/streams/network/SessionIT.java | 9 + .../internal/stream/MqttServerFactory.java | 149 +- .../mqtt/internal/stream/SessionIT.java | 14 + 78 files changed, 7605 insertions(+), 203 deletions(-) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt create mode 100644 incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java create mode 100644 incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt new file mode 100644 index 0000000000..27d934b82a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt @@ -0,0 +1,298 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_SENT + +connect await INIT_MIGRATE_SENT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write abort + +write notify GROUP1_ABORTED + +connect await GROUP1_ABORTED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write abort +read aborted + +write notify SESSION1_ABORTED + +connect await SESSION1_ABORTED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write abort +read aborted + +write notify SUBSCRIBE1_ABORTED + +connect await SUBSCRIBE1_ABORTED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write notify INIT2_MIGRATE_SENT + +connect await INIT2_MIGRATE_SENT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write notify GROUP2_FINISHED + +connect await GROUP2_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read advised zilla:flush + +write notify SESSION_STATE2_FINISHED + +connect await SESSION_STATE2_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt new file mode 100644 index 0000000000..b99284804c --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt @@ -0,0 +1,274 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write flush + +read aborted +write abort + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read aborted +write abort + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read aborted +write abort + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write advise zilla:flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt new file mode 100644 index 0000000000..b17bc2c18a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt @@ -0,0 +1,104 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_FINISHED + +connect await INIT_MIGRATE_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +read abort + +write notify GROUP_FINISHED + +connect await GROUP_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt new file mode 100644 index 0000000000..9761a45495 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt @@ -0,0 +1,97 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + +write aborted + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt new file mode 100644 index 0000000000..ed7bcbf4e0 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt @@ -0,0 +1,361 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_CLOSED + +connect await INIT_MIGRATE_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write advise zilla:flush + +write notify HEARTBEAT1_SENT + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(2) + .build() + .build()} + +write close + +write notify GROUP1_CLOSED + +connect await GROUP1_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify SESSION_STATE1_CLOSED + +connect await SESSION_STATE1_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write close +read closed + +write notify SUBSCRIBE1_CLOSED + +connect await SUBSCRIBE1_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write notify FIRST_CLIENT_LEFT + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +write close +read closed + +write notify INIT_MIGRATE2_FINISHED + +connect await INIT_MIGRATE2_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-2") + .members(2) + .build() + .build()} + +write advise zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-2") + .memberId("consumer-2") + .members(1) + .build() + .build()} + +write notify GROUP2_FINISHED + +connect await GROUP2_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read advised zilla:flush + +write notify SESSION_STATE2_FINISHED + +connect await SESSION_STATE2_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client-1") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt new file mode 100644 index 0000000000..0a31ece87b --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt @@ -0,0 +1,351 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + +read advised zilla:flush + +# On the session stream the heartbeat arrives (on the mqtt_sessions merged stream) +read await HEARTBEAT1_SENT + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(2) + .build() + .build()} +write flush + +# We've realised that we're the leader, but there's another member -> leave + +read closed +write notify FIRST_CLIENT_LEFT +# On the session publish stream, send a heartbeat, that triggers the second connection to heartbeat on the group stream + +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +#We've left the group -> send migrate +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +write notify HEARTBEAT1_SENT + +read await FIRST_CLIENT_LEFT + +#Migrate arrives from the first connection +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-2") + .members(2) + .build() + .build()} +write flush + +# On the session publish stream, send a heartbeat + +read advised zilla:flush + +# Wait until I receive a data frame, that confirms that I'm the leader +# Once it's confirmed, I can send the CONNACK + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-2") + .memberId("consumer-2") + .members(1) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write advise zilla:flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client-1") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt new file mode 100644 index 0000000000..b728334995 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt @@ -0,0 +1,326 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_SENT1 + +connect await INIT_MIGRATE_SENT1 + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write advise zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(2) + .build() + .build()} + +write close + +write notify GROUP1_CLOSED + +connect await GROUP1_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify SESSION2_NOT_LEADER + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +write notify FIRST_CLIENT_LEFT + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty + +write close +read closed + +write notify SESSION1_CLOSED + +connect await SESSION1_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write close +read closed + +write notify SUBSCRIBE1_CLOSED + +connect await SUBSCRIBE1_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +write notify INIT_MIGRATE_SENT2 + +connect await INIT_MIGRATE_SENT2 + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-2") + .members(2) + .build() + .build()} + +write advise zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-2") + .memberId("consumer-2") + .members(1) + .build() + .build()} + +write notify GROUP2_FINISHED + +connect await GROUP2_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt new file mode 100644 index 0000000000..0e677249de --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt @@ -0,0 +1,324 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + +# On the session stream the heartbeat arrives (on the mqtt_sessions merged stream) +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(2) + .build() + .build()} +write flush + +# We've realised that we're not the only member -> leave the group + +read closed +write notify FIRST_CLIENT_LEFT +# On the session publish stream, send a heartbeat, that triggers the second connection to heartbeat on the group stream + +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read await SESSION2_NOT_LEADER +#Hearbeat arrives +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +read await FIRST_CLIENT_LEFT + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read await FIRST_CLIENT_LEFT + +#Migrate arrives from the first connection +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-2") + .members(2) + .build() + .build()} +write flush +write notify SESSION2_NOT_LEADER + +# On the session publish stream, send a heartbeat + +read advised zilla:flush + + +# Wait until I receive a data frame, that confirms that I'm the leader +# Once it's confirmed, I can send the CONNACK + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-2") + .memberId("consumer-2") + .members(1) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write advise zilla:flush + +# clear session state, as it's a clean start +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt new file mode 100644 index 0000000000..74a4d2c97d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt @@ -0,0 +1,104 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_FINISHED + +connect await INIT_MIGRATE_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write notify CONNACK_TRIGGERED + + +write notify GROUP_FINISHED + +connect await GROUP_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt new file mode 100644 index 0000000000..1f89f2850f --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt @@ -0,0 +1,100 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + +read await CONNACK_TRIGGERED + +read abort + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +write notify CONNACK_TRIGGERED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt new file mode 100644 index 0000000000..582dbc7019 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt @@ -0,0 +1,102 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_FINISHED + +connect await INIT_MIGRATE_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write notify GROUP_FINISHED + +connect await GROUP_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt new file mode 100644 index 0000000000..d21303343b --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt @@ -0,0 +1,96 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt new file mode 100644 index 0000000000..ca39c0c712 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt @@ -0,0 +1,152 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write notify INIT_MIGRATE_SENT + +connect await INIT_MIGRATE_SENT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + + +write notify GROUP1_FINISHED + +connect await GROUP1_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify SESSION_FINISHED + +connect await SESSION_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt new file mode 100644 index 0000000000..9184a85857 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt @@ -0,0 +1,144 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write notify SESSION_STATE_SENT + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt new file mode 100644 index 0000000000..2f7bc1b5ce --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt @@ -0,0 +1,168 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_FINISHED + +connect await INIT_MIGRATE_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write notify GROUP_FINISHED + +connect await GROUP_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify SESSION_STATE_FINISHED + +connect await SESSION_STATE_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt new file mode 100644 index 0000000000..905ba443e3 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt @@ -0,0 +1,162 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# This is the second prerequisite +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt new file mode 100644 index 0000000000..4eb0d2067f --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt @@ -0,0 +1,192 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write notify SESSION1_FINISHED + +connect await SESSION1_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write notify GROUP1_FINISHED + +connect await GROUP1_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .build()} + +write notify SESSION_FINISHED + +connect await SESSION_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message" + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt new file mode 100644 index 0000000000..2e1992a80d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt @@ -0,0 +1,183 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(kafka:timestamp()) + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:topic", "sensor") + .header("zilla:topic", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message" +write flush + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt new file mode 100644 index 0000000000..9ebb076924 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt @@ -0,0 +1,163 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write notify INIT_MIGRATE_SENT + +connect await INIT_MIGRATE_SENT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write notify GROUP1_FINISHED + +connect await GROUP1_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .build()} + +write notify SESSION1_FINISHED + + +connect await SESSION1_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt new file mode 100644 index 0000000000..7c96e0c420 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt @@ -0,0 +1,152 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write advise zilla:flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} + +write ${mqtt:session() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:topic") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt new file mode 100644 index 0000000000..8429c220cc --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt @@ -0,0 +1,109 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read zilla:data.empty + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify SESSION1_READY + +write abort +read aborted +write notify SESSION1_ABORTED + + +connect await SESSION1_READY + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} +connected + +write abort +read aborted + +connect await SESSION1_ABORTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify SESSION_READY2 + +connect await SESSION_READY2 + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt new file mode 100644 index 0000000000..ad9ee2ce1f --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt @@ -0,0 +1,102 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + +read aborted +write abort + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +read aborted +write abort + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .build() + .build()} + +connected + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt new file mode 100644 index 0000000000..a225126b73 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt @@ -0,0 +1,32 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read zilla:data.empty + +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt new file mode 100644 index 0000000000..be2069e676 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt new file mode 100644 index 0000000000..5a8557316e --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt @@ -0,0 +1,107 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify READ_SESSION_STATE + +read closed +write close + +connect await READ_SESSION_STATE + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} +connected +write notify CONNECTED + +write close +read closed + + +connect await CONNECTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify SESSION_READY2 + +connect await SESSION_READY2 + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt new file mode 100644 index 0000000000..b99a7bfec5 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt @@ -0,0 +1,104 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read await CLIENT_TAKEOVER + +write close +read closed + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +read closed +write close + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .build() + .build()} + +connected + +write notify CLIENT_TAKEOVER + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt new file mode 100644 index 0000000000..ac9a831686 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt @@ -0,0 +1,87 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read zilla:data.empty + +write notify READ_SESSION_EMPTY + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify SESSION_READY + +read closed +write close + + +connect await READ_SESSION_EMPTY + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write close +read closed + +connect await SESSION_READY + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt new file mode 100644 index 0000000000..496822dd17 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt @@ -0,0 +1,77 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write close +read closed + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +read closed +write close + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .build() + .build()} + +connected + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt new file mode 100644 index 0000000000..c3f002620c --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt @@ -0,0 +1,32 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read zilla:data.empty + +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt new file mode 100644 index 0000000000..1802b2d422 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt new file mode 100644 index 0000000000..640f139646 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt @@ -0,0 +1,59 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read zilla:data.empty + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +connect await SESSION_STATE_SENT + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .format("TEXT") + .build() + .build()} + +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt new file mode 100644 index 0000000000..15212eda0a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt @@ -0,0 +1,61 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write notify SESSION_STATE_SENT +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .format("TEXT") + .build() + .build()} + +write "message" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt new file mode 100644 index 0000000000..a16768f84d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt @@ -0,0 +1,68 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read zilla:data.empty + +write notify READ_EMPTY_STATE + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + + +connect await READ_EMPTY_STATE + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +read "message" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt new file mode 100644 index 0000000000..a75a7dff44 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt @@ -0,0 +1,64 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +write "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt new file mode 100644 index 0000000000..7195298048 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt @@ -0,0 +1,76 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +read zilla:data.empty + +write notify READ_EMPTY_STATE + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .build()} + +read ${mqtt:session() + .build()} + + + +connect await READ_EMPTY_STATE + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +read "message" + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt new file mode 100644 index 0000000000..a15b6da914 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt @@ -0,0 +1,75 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .build()} + +write ${mqtt:session() + .build()} +write flush + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +write "message" + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt new file mode 100644 index 0000000000..21507a821f --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt @@ -0,0 +1,63 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + + +read zilla:data.empty + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify READ_SESSION_STATE + +read ${mqtt:session() + .build()} + + +connect await READ_SESSION_STATE + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt new file mode 100644 index 0000000000..b62e481f0d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt @@ -0,0 +1,63 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .expiry(1) + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read await READ_SESSION_STATE +write ${mqtt:session() + .build()} +write flush + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client-1") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt index 44d1399684..9fdc50a839 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt @@ -35,4 +35,4 @@ read zilla:data.ext ${mqtt:matchDataEx() .format("TEXT") .build() .build()} -read "message" \ No newline at end of file +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java index 8790938082..cf100bd1f3 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java @@ -475,4 +475,94 @@ public void shouldAcknowledgeSingleTopicFilter() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${kafka}/session.abort.reconnect.non.clean.start/client", + "${kafka}/session.abort.reconnect.non.clean.start/server"}) + public void shouldReconnectNonCleanStart() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.client.takeover/client", + "${kafka}/session.client.takeover/server"}) + public void shouldTakeOverSession() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.exists.clean.start/client", + "${kafka}/session.exists.clean.start/server"}) + public void shouldRemoveSessionAtCleanStart() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.subscribe/client", + "${kafka}/session.subscribe/server"}) + public void shouldSubscribeSaveSubscriptionsInSession() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.subscribe.via.session.state/client", + "${kafka}/session.subscribe.via.session.state/server"}) + public void shouldReceiveMessageSubscribedViaSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.unsubscribe.after.subscribe/client", + "${kafka}/session.unsubscribe.after.subscribe/server"}) + public void shouldUnsubscribeAndUpdateSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.unsubscribe.via.session.state/client", + "${kafka}/session.unsubscribe.via.session.state/server"}) + public void shouldUnsubscribeViaSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.client.sent.reset/client", + "${kafka}/session.client.sent.reset/server"}) + public void shouldSessionStreamReceiveClientSentReset() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.server.sent.reset/client", + "${kafka}/session.server.sent.reset/server"}) + public void shouldSessionStreamReceiveServerSentReset() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.group.server.sent.reset/client", + "${kafka}/session.group.server.sent.reset/server"}) + public void shouldGroupStreamReceiveServerSentReset() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java index 034760c69d..65827e7622 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java @@ -403,4 +403,85 @@ public void shouldAcknowledgeSingleTopicFilter() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${mqtt}/session.abort.reconnect.non.clean.start/client", + "${mqtt}/session.abort.reconnect.non.clean.start/server"}) + public void shouldReconnectNonCleanStart() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.client.takeover/client", + "${mqtt}/session.client.takeover/server"}) + public void shouldTakeOverSession() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.exists.clean.start/client", + "${mqtt}/session.exists.clean.start/server"}) + public void shouldRemoveSessionAtCleanStart() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.subscribe/client", + "${mqtt}/session.subscribe/server"}) + public void shouldSubscribeSaveSubscriptionsInSession() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.subscribe.via.session.state/client", + "${mqtt}/session.subscribe.via.session.state/server"}) + public void shouldReceiveMessageSubscribedViaSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.unsubscribe.after.subscribe/client", + "${mqtt}/session.unsubscribe.after.subscribe/server"}) + public void shouldUnsubscribeAndUpdateSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.unsubscribe.via.session.state/client", + "${mqtt}/session.unsubscribe.via.session.state/server"}) + public void shouldUnsubscribeViaSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.client.sent.reset/client", + "${mqtt}/session.client.sent.reset/server"}) + public void shouldSessionStreamReceiveClientSentReset() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.server.sent.reset/client", + "${mqtt}/session.server.sent.reset/server"}) + public void shouldSessionStreamReceiveServerSentReset() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java index 72030e5d0c..440919173d 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java @@ -14,20 +14,31 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal; +import java.lang.reflect.Method; +import java.util.UUID; +import java.util.function.Supplier; + +import org.agrona.LangUtil; + import io.aklivity.zilla.runtime.engine.Configuration; public class MqttKafkaConfiguration extends Configuration { private static final ConfigurationDef MQTT_KAFKA_CONFIG; - public static final PropertyDef KAFKA_MESSAGES_TOPIC; - public static final PropertyDef KAFKA_RETAINED_MESSAGES_TOPIC; + public static final PropertyDef MESSAGES_TOPIC; + public static final PropertyDef RETAINED_MESSAGES_TOPIC; + public static final PropertyDef SESSIONS_TOPIC; + public static final PropertyDef SESSION_ID; static { final ConfigurationDef config = new ConfigurationDef("zilla.binding.mqtt.kafka"); - KAFKA_MESSAGES_TOPIC = config.property("messages.topic", "mqtt_messages"); - KAFKA_RETAINED_MESSAGES_TOPIC = config.property("retained.messages.topic", "mqtt_retained"); + MESSAGES_TOPIC = config.property("messages.topic", "mqtt_messages"); + RETAINED_MESSAGES_TOPIC = config.property("retained.messages.topic", "mqtt_retained"); + SESSIONS_TOPIC = config.property("sessions.topic", "mqtt_sessions"); + SESSION_ID = config.property(SessionIdSupplier.class, "session.id", + MqttKafkaConfiguration::decodeSessionIdSupplier, MqttKafkaConfiguration::defaultSessionIdSupplier); MQTT_KAFKA_CONFIG = config; } @@ -37,13 +48,59 @@ public MqttKafkaConfiguration( super(MQTT_KAFKA_CONFIG, config); } - public String messagesTopic() + public Supplier sessionIdSupplier() + { + return SESSION_ID.get(this); + } + + @FunctionalInterface + public interface SessionIdSupplier extends Supplier { - return KAFKA_MESSAGES_TOPIC.get(this); } - public String retainedMessagesTopic() + private static SessionIdSupplier decodeSessionIdSupplier( + Configuration config, + String value) + { + try + { + String className = value.substring(0, value.indexOf("$$Lambda")); + Class lambdaClass = Class.forName(className); + + Method targetMethod = null; + for (Method method : lambdaClass.getDeclaredMethods()) + { + if (method.isSynthetic()) + { + targetMethod = method; + break; + } + } + + Method finalTargetMethod = targetMethod; + return () -> + { + try + { + finalTargetMethod.setAccessible(true); + return (String) finalTargetMethod.invoke(null); + } + catch (Exception e) + { + throw new RuntimeException("Failed to invoke the lambda method.", e); + } + }; + } + catch (Throwable ex) + { + LangUtil.rethrowUnchecked(ex); + } + return null; + } + + private static SessionIdSupplier defaultSessionIdSupplier( + Configuration config) { - return KAFKA_RETAINED_MESSAGES_TOPIC.get(this); + return () -> String.format("%s-%s", "zilla", UUID.randomUUID()); } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java index d303424373..fb094cee35 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java @@ -37,7 +37,7 @@ public class MqttKafkaProxyFactory implements MqttKafkaStreamFactory private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); private final int mqttTypeId; - private final Int2ObjectHashMap factories; + private final Int2ObjectHashMap factories; private final Long2ObjectHashMap bindings; public MqttKafkaProxyFactory( @@ -45,7 +45,7 @@ public MqttKafkaProxyFactory( EngineContext context) { final Long2ObjectHashMap bindings = new Long2ObjectHashMap<>(); - final Int2ObjectHashMap factories = new Int2ObjectHashMap<>(); + final Int2ObjectHashMap factories = new Int2ObjectHashMap<>(); final MqttKafkaPublishFactory publishFactory = new MqttKafkaPublishFactory( config, context, bindings::get); @@ -53,12 +53,12 @@ public MqttKafkaProxyFactory( final MqttKafkaSubscribeFactory subscribeFactory = new MqttKafkaSubscribeFactory( config, context, bindings::get); - // final MqttKafkaSessionFactory sessionFactory = new MqttKafkaSessionFactory( - // config, context, bindings::get); + final MqttKafkaSessionFactory sessionFactory = new MqttKafkaSessionFactory( + config, context, bindings::get); factories.put(MqttBeginExFW.KIND_PUBLISH, publishFactory); factories.put(MqttBeginExFW.KIND_SUBSCRIBE, subscribeFactory); - // factories.put(MqttBeginExFW.KIND_SESSION, sessionFactory); + factories.put(MqttBeginExFW.KIND_SESSION, sessionFactory); this.mqttTypeId = context.supplyTypeId(MQTT_TYPE_NAME); this.factories = factories; @@ -71,6 +71,8 @@ public void attach( { MqttKafkaBindingConfig kafkaBinding = new MqttKafkaBindingConfig(binding); bindings.put(binding.id, kafkaBinding); + + factories.values().forEach(streamFactory -> streamFactory.onAttached(binding.id)); } @Override @@ -78,6 +80,8 @@ public void detach( long bindingId) { bindings.remove(bindingId); + + factories.values().forEach(streamFactory -> streamFactory.onDetached(bindingId)); } @Override @@ -93,7 +97,7 @@ public MessageConsumer newStream( final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); assert beginEx != null; final int typeId = beginEx.typeId(); - assert beginEx != null && typeId == mqttTypeId; + assert typeId == mqttTypeId; MessageConsumer newStream = null; diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java index 4bbe31ca92..c4e934ecf6 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java @@ -58,7 +58,7 @@ import io.aklivity.zilla.runtime.engine.binding.BindingHandler; import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; -public class MqttKafkaPublishFactory implements BindingHandler +public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory { private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); private static final KafkaAckMode KAFKA_DEFAULT_ACK_MODE = KafkaAckMode.LEADER_ONLY; @@ -520,23 +520,6 @@ private void doMqttBegin( traceId, authorization, affinity); } - private void doMqttData( - long traceId, - long authorization, - long budgetId, - int reserved, - int flags, - OctetsFW payload, - Flyweight extension) - { - doData(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, budgetId, flags, reserved, payload, extension); - - replySeq += reserved; - - assert replySeq <= replyAck + replyMax; - } - private void doMqttFlush( long traceId, long authorization, diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java new file mode 100644 index 0000000000..4b38221c86 --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -0,0 +1,1646 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; + +import static java.time.Instant.now; +import static java.util.concurrent.TimeUnit.SECONDS; + +import java.util.function.LongFunction; +import java.util.function.LongUnaryOperator; +import java.util.function.Supplier; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Long2ObjectHashMap; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaAckMode; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaCapabilities; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaKeyFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetType; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSessionStateFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.FlushFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaGroupDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; + +public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory +{ + private static final KafkaAckMode KAFKA_DEFAULT_ACK_MODE = KafkaAckMode.LEADER_ONLY; + private static final String KAFKA_TYPE_NAME = "kafka"; + private static final String MIGRATE_KEY_POSTFIX = "#migrate"; + private static final String GROUP_PROTOCOL = "highlander"; + private static final String16FW SENDER_ID_NAME = new String16FW("sender-id"); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); + private static final int DATA_FLAG_COMPLETE = 0x03; + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final FlushFW flushRO = new FlushFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); + + private final WindowFW windowRO = new WindowFW(); + private final ResetFW resetRO = new ResetFW(); + + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + + private final ExtensionFW extensionRO = new ExtensionFW(); + private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); + private final MqttSessionStateFW mqttSessionStateRO = new MqttSessionStateFW(); + private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final int kafkaTypeId; + private final LongFunction supplyBinding; + private final Supplier supplySessionId; + private final Long2ObjectHashMap sessionIds; + + public MqttKafkaSessionFactory( + MqttKafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KAFKA_TYPE_NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBinding = supplyBinding; + this.supplySessionId = config.sessionIdSupplier(); + this.sessionIds = new Long2ObjectHashMap<>(); + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer mqtt) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + + final MqttKafkaBindingConfig binding = supplyBinding.apply(routedId); + + final MqttKafkaRouteConfig resolved = binding != null ? binding.resolve(authorization) : null; + + MessageConsumer newStream = null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + final String16FW sessionTopic = binding.sessionsTopic(); + newStream = new MqttSessionProxy(mqtt, originId, routedId, initialId, resolvedId, + binding.id, sessionTopic)::onMqttMessage; + } + + return newStream; + } + + @Override + public void onAttached( + long bindingId) + { + sessionIds.put(bindingId, supplySessionId.get()); + } + + @Override + public void onDetached( + long bindingId) + { + sessionIds.remove(bindingId); + } + + private final class MqttSessionProxy + { + private final MessageConsumer mqtt; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final KafkaGroupProxy group; + private final String16FW sessionId; + private final String16FW sessionsTopic; + + + private KafkaSessionProxy session; + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private String16FW clientId; + private String16FW clientIdMigrate; + private int sessionExpiryMillis; + + private MqttSessionProxy( + MessageConsumer mqtt, + long originId, + long routedId, + long initialId, + long resolvedId, + long bindingId, + String16FW sessionsTopic) + { + this.mqtt = mqtt; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.session = new KafkaSessionSignalProxy(originId, resolvedId, this); + this.group = new KafkaGroupProxy(originId, resolvedId, this); + this.sessionsTopic = sessionsTopic; + this.sessionId = new String16FW(sessionIds.get(bindingId)); + } + + private void onMqttMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onMqttBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onMqttData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onMqttEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onMqttAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onMqttReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onMqttWindow(window); + break; + } + } + + private void onMqttBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + state = MqttKafkaState.openingInitial(state); + + assert initialAck <= initialSeq; + + final OctetsFW extension = begin.extension(); + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SESSION; + final MqttSessionBeginExFW mqttSessionBeginEx = mqttBeginEx.session(); + + final String clientId0 = mqttSessionBeginEx.clientId().asString(); + this.clientId = new String16FW(clientId0); + this.clientIdMigrate = new String16FW(clientId0 + MIGRATE_KEY_POSTFIX); + + final int sessionExpiry = mqttSessionBeginEx.expiry(); + sessionExpiryMillis = mqttSessionBeginEx.expiry() == 0 ? Integer.MAX_VALUE : (int) SECONDS.toMillis(sessionExpiry); + session.doKafkaBeginIfNecessary(traceId, authorization, affinity, null, clientIdMigrate, sessionId); + group.doKafkaBegin(traceId, authorization, affinity); + } + + private void onMqttData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); + + MqttSessionStateFW sessionState = mqttSessionStateRO.tryWrap(buffer, offset, limit); + + Flyweight kafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length()))) + .build(); + + if (sessionState != null) + { + session.doKafkaData(traceId, authorization, budgetId, reserved, flags, sessionState, kafkaDataEx); + } + else + { + session.doKafkaData(traceId, authorization, budgetId, reserved, flags, EMPTY_OCTETS, kafkaDataEx); + } + } + + + private void onMqttEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = MqttKafkaState.closeInitial(state); + + assert initialAck <= initialSeq; + + session.doKafkaEnd(traceId, initialSeq, authorization); + group.doKafkaEnd(traceId, initialSeq, authorization); + } + + private void onMqttAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = MqttKafkaState.closeInitial(state); + + assert initialAck <= initialSeq; + + session.doKafkaAbort(traceId, authorization); + group.doKafkaAbort(traceId, authorization); + } + + private void onMqttReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final int maximum = reset.maximum(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + session.doKafkaReset(traceId); + group.doKafkaReset(traceId); + } + + private void onMqttWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long authorization = window.authorization(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyPad = padding; + state = MqttKafkaState.openReply(state); + + assert replyAck <= replySeq; + + session.doKafkaWindow(traceId, authorization, budgetId, capabilities); + if (sequence == 0) + { + group.doKafkaWindow(traceId, authorization, budgetId, padding, capabilities); + } + } + + private void doMqttBegin( + long traceId, + long authorization, + long affinity) + { + if (!MqttKafkaState.replyOpening(state)) + { + replySeq = session.replySeq; + replyAck = session.replyAck; + replyMax = session.replyMax; + state = MqttKafkaState.openingReply(state); + + doBegin(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity); + } + } + + private void doMqttData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + MqttSessionStateFW sessionState) + { + Flyweight state = sessionState == null ? EMPTY_OCTETS : sessionState; + final DirectBuffer buffer = state.buffer(); + final int offset = state.offset(); + final int limit = state.limit(); + final int length = limit - offset; + + doData(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, flags, reserved, buffer, offset, length, EMPTY_OCTETS); + + replySeq += reserved; + + assert replySeq <= replyAck + replyMax; + } + + private void doMqttData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload) + { + doData(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, flags, reserved, payload, EMPTY_OCTETS); + + replySeq += reserved; + + assert replySeq <= replyAck + replyMax; + } + + private void doMqttAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.replyClosed(state)) + { + replySeq = session.replySeq; + state = MqttKafkaState.closeReply(state); + + doAbort(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization); + } + } + + private void doMqttEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.replyClosed(state)) + { + replySeq = session.replySeq; + state = MqttKafkaState.closeReply(state); + + doEnd(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization); + } + } + + private void doMqttWindow( + long authorization, + long traceId, + long budgetId, + int padding, + int capabilities) + { + initialAck = session.initialAck; + initialMax = session.initialMax; + + doWindow(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding, 0, capabilities); + } + + private void doMqttReset( + long traceId) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doReset(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId); + } + } + } + + private abstract class KafkaSessionProxy + { + protected MessageConsumer kafka; + protected final long originId; + protected final long routedId; + protected long initialId; + protected long replyId; + protected final MqttSessionProxy delegate; + + protected int state; + + protected long initialSeq; + protected long initialAck; + protected int initialMax; + + protected long replySeq; + protected long replyAck; + protected int replyMax; + protected int replyPad; + + private KafkaSessionProxy( + long originId, + long routedId, + MqttSessionProxy delegate) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void doKafkaBeginIfNecessary( + long traceId, + long authorization, + long affinity, + String16FW clientId, + String16FW clientIdMigrate, + String16FW sessionIdentifier) + { + if (!MqttKafkaState.initialOpening(state)) + { + doKafkaBegin(traceId, authorization, affinity, clientId, clientIdMigrate, sessionIdentifier); + } + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity, + String16FW clientId, + String16FW clientIdMigrate, + String16FW sessionIdentifier) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.sessionsTopic, clientId, clientIdMigrate, sessionIdentifier); + } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + MqttSessionStateFW sessionState, + Flyweight extension) + { + final DirectBuffer buffer = sessionState.buffer(); + final int offset = sessionState.offset(); + final int limit = sessionState.limit(); + final int length = limit - offset; + + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, buffer, offset, length, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doKafkaEnd( + long traceId, + long sequence, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, 0, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + doKafkaReset(traceId); + delegate.doMqttAbort(traceId, authorization); + } + else + { + handleKafkaData(data); + } + } + + protected abstract void handleKafkaData(DataFW data); + + protected abstract void onKafkaWindow(WindowFW window); + + protected void onKafkaEnd( + EndFW end) + { + } + + protected void onKafkaFlush( + FlushFW flush) + { + } + + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttAbort(traceId, authorization); + } + + protected void sendMigrateSignal(long authorization, long traceId) + { + Flyweight kafkaMigrateDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(delegate.clientIdMigrate.length()) + .value(delegate.clientIdMigrate.value(), 0, delegate.clientIdMigrate.length())) + .headersItem(c -> c.nameLen(SENDER_ID_NAME.length()) + .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) + .valueLen(delegate.sessionId.length()) + .value(delegate.sessionId.value(), 0, delegate.sessionId.length()))) + .build(); + + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + EMPTY_OCTETS, kafkaMigrateDataEx); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doMqttReset(traceId); + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, replyPad, 0, capabilities); + } + } + + private final class KafkaSessionSignalProxy extends KafkaSessionProxy + { + private KafkaSessionSignalProxy( + long originId, + long routedId, + MqttSessionProxy delegate) + { + super(originId, routedId, delegate); + } + + @Override + protected void handleKafkaData(DataFW data) + { + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + final OctetsFW extension = data.extension(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; + + if (key != null) + { + delegate.group.doKafkaFlush(traceId, authorization, budgetId, reserved); + } + } + + @Override + protected void onKafkaWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + + if (!wasOpen) + { + sendMigrateSignal(authorization, traceId); + } + } + } + + private final class KafkaSessionStateProxy extends KafkaSessionProxy + { + private KafkaSessionStateProxy( + long originId, + long routedId, + MqttSessionProxy delegate) + { + super(originId, routedId, delegate); + } + + @Override + protected void handleKafkaData(DataFW data) + { + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; + + if (key != null) + { + if (key.length() == (delegate.clientId.length())) + { + MqttSessionStateFW sessionState = + mqttSessionStateRO.tryWrap(payload.buffer(), payload.offset(), payload.limit()); + delegate.doMqttData(traceId, authorization, budgetId, reserved, flags, sessionState); + } + else if (key.length() == delegate.clientIdMigrate.length()) + { + delegate.group.doKafkaFlush(traceId, authorization, budgetId, reserved); + } + } + } + + @Override + protected void onKafkaWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + + delegate.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); + } + + @Override + protected void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + final long budgetId = flush.budgetId(); + final int reserved = flush.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + + assert replyAck <= replySeq; + + delegate.doMqttData(traceId, authorization, budgetId, reserved, DATA_FLAG_COMPLETE, EMPTY_OCTETS); + } + + @Override + protected void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttEnd(traceId, authorization); + } + } + + private final class KafkaGroupProxy + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final MqttSessionProxy delegate; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaGroupProxy( + long originId, + long routedId, + MqttSessionProxy delegate) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.openingInitial(state); + + kafka = newGroupStream(this::onGroupMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.clientId, delegate.sessionExpiryMillis); + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + int reserved) + { + initialSeq = delegate.initialSeq; + + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved); + } + + private void doKafkaEnd( + long traceId, + long sequence, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void onGroupMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttBegin(traceId, authorization, affinity); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + if (replySeq > replyAck + replyMax) + { + doKafkaReset(traceId); + delegate.doMqttAbort(traceId, authorization); + } + else + { + final OctetsFW extension = data.extension(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaGroupDataExFW kafkaGroupDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_GROUP ? kafkaDataEx.group() : null; + final String16FW leaderId = kafkaGroupDataEx != null ? kafkaGroupDataEx.leaderId() : null; + final String16FW memberId = kafkaGroupDataEx != null ? kafkaGroupDataEx.memberId() : null; + final int members = kafkaGroupDataEx != null ? kafkaGroupDataEx.members() : 0; + + if (leaderId.equals(memberId)) + { + if (members > 1) + { + delegate.session.sendMigrateSignal(authorization, traceId); + doKafkaEnd(traceId, sequence, authorization); + } + else + { + delegate.session.doKafkaEnd(traceId, sequence, authorization); + final long routedId = delegate.session.routedId; + delegate.session = new KafkaSessionStateProxy(originId, routedId, delegate); + delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0, + delegate.clientId, delegate.clientIdMigrate, delegate.sessionId); + } + } + } + } + + private void onKafkaEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttEnd(traceId, authorization); + } + + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttAbort(traceId, authorization); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doMqttReset(traceId); + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyAck = delegate.replyAck; + replyMax = delegate.replyMax; + replyPad = delegate.replyPad; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + } + + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + DirectBuffer buffer, + int index, + int length, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(buffer, index, length) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + + private MessageConsumer newKafkaStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW sessionsTopicName, + String16FW clientId, + String16FW clientIdMigrate, + String16FW sessionId) + { + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> + { + m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)); + m.topic(sessionsTopicName); + if (clientId != null) + { + m.partitionsItem(p -> + p.partitionId(KafkaOffsetType.HISTORICAL.value()) + .partitionOffset(KafkaOffsetType.HISTORICAL.value())); + m.filtersItem(f -> f.conditionsItem(ci -> + ci.key(kb -> kb.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())))); + } + m.filtersItem(f -> + { + f.conditionsItem(ci -> + ci.key(kb -> kb.length(clientIdMigrate.length()) + .value(clientIdMigrate.value(), 0, clientIdMigrate.length()))); + f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + h.nameLen(SENDER_ID_NAME.length()) + .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) + .valueLen(sessionId.length()) + .value(sessionId.value(), 0, sessionId.length()))))); + }); + m.ackMode(b -> b.set(KAFKA_DEFAULT_ACK_MODE)); + }) + .build(); + + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private MessageConsumer newGroupStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW clientId, + int sessionExpiryMs) + { + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .group(g -> g.groupId(clientId).protocol(GROUP_PROTOCOL).timeout(sessionExpiryMs)) + .build(); + + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding, + int minimum, + int capabilities) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .minimum(minimum) + .capabilities(capabilities) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } +} diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java index 73aa200e57..6aaf100848 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java @@ -19,9 +19,23 @@ public interface MqttKafkaStreamFactory extends BindingHandler { - void attach( - BindingConfig binding); + default void attach( + BindingConfig binding) + { + } - void detach( - long bindingId); + default void detach( + long bindingId) + { + } + + default void onAttached( + long binding) + { + } + + default void onDetached( + long bindingId) + { + } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java index 5c0d00e5da..36bcbfe998 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java @@ -73,7 +73,7 @@ import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; import io.aklivity.zilla.runtime.engine.buffer.BufferPool; -public class MqttKafkaSubscribeFactory implements BindingHandler +public class MqttKafkaSubscribeFactory implements MqttKafkaStreamFactory { private static final String MQTT_TYPE_NAME = "mqtt"; private static final String KAFKA_TYPE_NAME = "kafka"; diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java index e059806edd..c1b2c181c2 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java @@ -15,21 +15,21 @@ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.KAFKA_MESSAGES_TOPIC; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.KAFKA_RETAINED_MESSAGES_TOPIC; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.MESSAGES_TOPIC; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.RETAINED_MESSAGES_TOPIC; import static org.junit.Assert.assertEquals; import org.junit.Test; public class MqttKafkaConfigurationTest { - public static final String KAFKA_MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.messages.topic"; - public static final String KAFKA_RETAINED_MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.retained.messages.topic"; + public static final String MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.messages.topic"; + public static final String RETAINED_MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.retained.messages.topic"; @Test public void shouldVerifyConstants() { - assertEquals(KAFKA_MESSAGES_TOPIC.name(), KAFKA_MESSAGES_TOPIC_NAME); - assertEquals(KAFKA_RETAINED_MESSAGES_TOPIC.name(), KAFKA_RETAINED_MESSAGES_TOPIC_NAME); + assertEquals(MESSAGES_TOPIC.name(), MESSAGES_TOPIC_NAME); + assertEquals(RETAINED_MESSAGES_TOPIC.name(), RETAINED_MESSAGES_TOPIC_NAME); } } diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java new file mode 100644 index 0000000000..438cc4d612 --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java @@ -0,0 +1,154 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_ID; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class MqttKafkaSessionProxyIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("mqtt", "io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt") + .addScriptRoot("kafka", "io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(10, SECONDS)); + + public final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) + .configure(SESSION_ID, () -> "sender-1") + .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/kafka/config") + .external("kafka0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.abort.reconnect.non.clean.start/client", + "${kafka}/session.abort.reconnect.non.clean.start/server"}) + public void shouldReconnectNonCleanStart() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.client.takeover/client", + "${kafka}/session.client.takeover/server"}) + public void shouldTakeOverSession() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.exists.clean.start/client", + "${kafka}/session.exists.clean.start/server"}) + public void shouldRemoveSessionAtCleanStart() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.subscribe/client", + "${kafka}/session.subscribe/server"}) + public void shouldSubscribeSaveSubscriptionsInSession() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.subscribe.via.session.state/client", + "${kafka}/session.subscribe.via.session.state/server"}) + public void shouldReceiveMessageSubscribedViaSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.unsubscribe.after.subscribe/client", + "${kafka}/session.unsubscribe.after.subscribe/server"}) + public void shouldUnsubscribeAndUpdateSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.unsubscribe.via.session.state/client", + "${kafka}/session.unsubscribe.via.session.state/server"}) + public void shouldUnsubscribeViaSessionState() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.client.sent.reset/client", + "${kafka}/session.client.sent.reset/server"}) + public void shouldSessionStreamReceiveClientSentReset() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.server.sent.reset/client", + "${kafka}/session.server.sent.reset/server"}) + public void shouldSessionStreamReceiveServerSentReset() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.server.sent.reset/client", + "${kafka}/session.group.server.sent.reset/server"}) + public void shouldGroupStreamReceiveServerSentReset() throws Exception + { + k3po.finish(); + } +} diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index 221e865081..697c73aa40 100644 --- a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -688,13 +688,6 @@ private MqttSessionStateBuilder() sessionStateRW.wrap(writeBuffer, 0, writeBuffer.capacity()); } - public MqttSessionStateBuilder clientId( - String clientId) - { - sessionStateRW.clientId(clientId); - return this; - } - public MqttSessionStateBuilder subscription( String pattern) { diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index 542414acc6..b2e51a0efb 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -77,7 +77,7 @@ scope mqtt struct MqttSessionState { - string16 clientId; + uint8 version = 1; MqttTopicFilter[] subscriptions; } diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt index a5432b0a7d..72c682e759 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt @@ -30,12 +30,10 @@ connected read zilla:data.empty write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} @@ -77,7 +75,6 @@ write zilla:begin.ext ${mqtt:beginEx() connected read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt index 81c799aa72..8a8d7a410a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt @@ -33,12 +33,10 @@ write zilla:data.empty write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush @@ -72,7 +70,6 @@ read zilla:begin.ext ${mqtt:matchBeginEx() connected write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt index d79c901aa3..77f2824ab1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt @@ -28,12 +28,10 @@ write zilla:begin.ext ${mqtt:beginEx() connected write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} @@ -78,7 +76,6 @@ write zilla:begin.ext ${mqtt:beginEx() connected read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt index 81dc1dedde..42a990e7c1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt @@ -33,12 +33,10 @@ write zilla:data.empty write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush @@ -78,7 +76,6 @@ connected write notify CLIENT_TAKEOVER write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt index 26a9a80ed1..1e1c2aed10 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt @@ -30,13 +30,11 @@ connected read zilla:data.empty write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} @@ -81,7 +79,6 @@ write zilla:begin.ext ${mqtt:beginEx() connected read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt index 026b97886b..07a9968ea5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt @@ -33,12 +33,10 @@ write zilla:data.empty write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush @@ -75,7 +73,6 @@ read zilla:begin.ext ${mqtt:matchBeginEx() connected write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt new file mode 100644 index 0000000000..bf293f83a1 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt @@ -0,0 +1,53 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt new file mode 100644 index 0000000000..f03fc943aa --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt @@ -0,0 +1,32 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .build() + .build()} + +connected + +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt new file mode 100644 index 0000000000..22d2e6529e --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt @@ -0,0 +1,72 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1) + .filter("sensor/two", 2) + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt new file mode 100644 index 0000000000..1f43fea32d --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt @@ -0,0 +1,75 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} +write flush + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected + + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1) + .filter("sensor/two", 2) + .build() + .build()} + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt index 9ea474d72b..998bf4e10f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt @@ -30,7 +30,6 @@ connected read zilla:data.empty read ${mqtt:session() - .clientId("client") .subscription("sensor/one") .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt index e24c364374..faab527cfa 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt @@ -33,7 +33,6 @@ write zilla:data.empty write flush write ${mqtt:session() - .clientId("client") .subscription("sensor/one") .build()} write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt index ac18737ae1..bf293f83a1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt @@ -30,13 +30,11 @@ connected read zilla:data.empty write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt index b40a5ad9a5..0e78132b91 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt @@ -33,12 +33,10 @@ write zilla:data.empty write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt index 781d702696..200d21fc64 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt @@ -30,24 +30,20 @@ connected read zilla:data.empty write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .subscription("sensor/two", 1) .build()} write notify UNSUBSCRIBE_ALL_FILTERS read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .subscription("sensor/two", 1) .build()} write ${mqtt:session() - .clientId("client") .build()} read ${mqtt:session() - .clientId("client") .build()} write notify SESSION_FINISHED diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt index 113ba9999a..6243dda9e3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt @@ -32,25 +32,21 @@ write zilla:data.empty write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .subscription("sensor/two", 1) .build()} read await UNSUBSCRIBE_ALL_FILTERS write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .subscription("sensor/two", 1) .build()} write flush read ${mqtt:session() - .clientId("client") .build()} write ${mqtt:session() - .clientId("client") .build()} write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt index e9c72a820c..a6cfebd9c9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt @@ -30,21 +30,17 @@ connected read zilla:data.empty write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write ${mqtt:session() - .clientId("client") .build()} read ${mqtt:session() - .clientId("client") .build()} write notify SESSION_READY diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt index 9d0e5a0525..8b8acf889c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt @@ -34,22 +34,18 @@ write zilla:data.empty write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush read ${mqtt:session() - .clientId("client") .build()} write ${mqtt:session() - .clientId("client") .build()} write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt index fb769fed46..e7af77b53c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt @@ -31,19 +31,16 @@ connected read zilla:data.empty write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write notify SUBSCRIBED read ${mqtt:session() - .clientId("client") .build()} write notify SESSION_READY diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt index cf0094889c..87f98db3e8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt @@ -34,19 +34,16 @@ write zilla:data.empty write flush read ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write ${mqtt:session() - .clientId("client") .subscription("sensor/one", 1) .build()} write flush read await SUBSCRIBED write ${mqtt:session() - .clientId("client") .build()} write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt new file mode 100644 index 0000000000..6c4ad76ad8 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt @@ -0,0 +1,59 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none + +write [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +write [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x02] # subscription id = 2 + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt new file mode 100644 index 0000000000..ec1a3928af --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt @@ -0,0 +1,60 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x00] # properties = none + +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x02] # subscription id = 2 + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index 6305e0ec73..44f2173d67 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -1272,7 +1272,6 @@ public void shouldEncodeMqttResetEx() public void shouldEncodeMqttSessionState() { final byte[] array = MqttFunctions.session() - .clientId("client") .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") .subscription("sensor/two") .build(); @@ -1280,7 +1279,6 @@ public void shouldEncodeMqttSessionState() DirectBuffer buffer = new UnsafeBuffer(array); MqttSessionStateFW sessionState = new MqttSessionStateFW().wrap(buffer, 0, buffer.capacity()); - assertEquals("client", sessionState.clientId().asString()); assertNotNull(sessionState.subscriptions() .matchFirst(f -> "sensor/one".equals(f.pattern().asString()) && diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java index 88446388e7..1a2e6ef51c 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java @@ -172,4 +172,13 @@ public void shouldRedirectAfterConnack() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/session.subscribe.multiple.isolated/client", + "${app}/session.subscribe.multiple.isolated/server"}) + public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java index 8f95ded58b..a3dd248b41 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java @@ -164,4 +164,13 @@ public void shouldRedirectAfterConnack() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/session.subscribe.multiple.isolated/client", + "${net}/session.subscribe.multiple.isolated/server"}) + public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 91b326a2dd..4b4ead716e 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -597,43 +597,6 @@ private void doData( receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); } - private void doData( - MessageConsumer receiver, - long originId, - long routedId, - long streamId, - long sequence, - long acknowledge, - int maximum, - long traceId, - long authorization, - long budgetId, - int reserved, - int flags, - DirectBuffer buffer, - int index, - int length, - Flyweight extension) - { - final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) - .originId(originId) - .routedId(routedId) - .streamId(streamId) - .sequence(sequence) - .acknowledge(acknowledge) - .maximum(maximum) - .traceId(traceId) - .authorization(authorization) - .flags(flags) - .budgetId(budgetId) - .reserved(reserved) - .payload(buffer, index, length) - .extension(extension.buffer(), extension.offset(), extension.sizeof()) - .build(); - - receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); - } - private void doEnd( MessageConsumer receiver, long originId, @@ -1982,29 +1945,30 @@ private void onDecodeSubscribe( if (session) { - final MqttSessionStateFW.Builder sessionStateBuilder = - mqttSessionStateFW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()) - .clientId(clientId); + final MqttSessionStateFW.Builder state = + mqttSessionStateFW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()); + + sessionStream.unAckedSubscriptions.addAll(newSubscriptions); + sessionStream.subscriptions.forEach(sub -> + state.subscriptionsItem(subscriptionBuilder -> + subscriptionBuilder + .subscriptionId(sub.id) + .flags(sub.flags) + .pattern(sub.filter)) + ); - newSubscriptions.forEach(subscription -> - { - sessionStateBuilder.subscriptionsItem(subscriptionBuilder -> - { - subscriptionBuilder.subscriptionId(subscription.id); - subscriptionBuilder.flags(subscription.flags); - subscriptionBuilder.pattern(subscription.filter); - }); - sessionStream.unAckedSubscriptions.add(subscription); - } + newSubscriptions.forEach(sub -> + state.subscriptionsItem(subscriptionBuilder -> + subscriptionBuilder + .subscriptionId(sub.id) + .flags(sub.flags) + .pattern(sub.filter)) ); - final MqttSessionStateFW sessionState = sessionStateBuilder.build(); + final MqttSessionStateFW sessionState = state.build(); final int payloadSize = sessionState.sizeof(); - //TODO: is this correct? What is this? - int reserved = payloadSize; - - sessionStream.doSessionData(traceId, reserved, sessionState); + sessionStream.doSessionData(traceId, payloadSize, sessionState); } else { @@ -2044,7 +2008,6 @@ private void openSubscribeStreams( stream.packetId = packetId; stream.doSubscribeBeginOrFlush(traceId, affinity, subscribeKey, value); }); - } private void onDecodeUnsubscribe( @@ -2058,12 +2021,6 @@ private void onDecodeUnsubscribe( final int decodeLimit = decodePayload.limit(); final int offset = decodePayload.offset(); - final MutableDirectBuffer encodeBuffer = payloadBuffer; - final int encodeOffset = 0; - final int encodeLimit = payloadBuffer.capacity(); - - int encodeProgress = encodeOffset; - int decodeReasonCode = SUCCESS; final List topicFilters = new ArrayList<>(); @@ -2107,7 +2064,7 @@ private void onDecodeUnsubscribe( return; } topicFilters.forEach(filter -> unsubscribePacketIds.put(filter, packetId)); - sendNewSessionStateForUnsubscribe(traceId, authorization, topicFilters); + doSendSessionState(traceId, topicFilters); } else { @@ -2117,19 +2074,17 @@ private void onDecodeUnsubscribe( } } - private void sendNewSessionStateForUnsubscribe( + private void doSendSessionState( long traceId, - long authorization, List topicFilters) { - List currentState = sessionStream.getSubscriptions(); + List currentState = sessionStream.subscriptions(); List newState = currentState.stream() .filter(subscription -> !topicFilters.contains(subscription.filter)) .collect(Collectors.toList()); final MqttSessionStateFW.Builder sessionStateBuilder = - mqttSessionStateFW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()) - .clientId(clientId); + mqttSessionStateFW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()); newState.forEach(subscription -> sessionStateBuilder.subscriptionsItem(subscriptionBuilder -> @@ -2143,10 +2098,7 @@ private void sendNewSessionStateForUnsubscribe( final MqttSessionStateFW sessionState = sessionStateBuilder.build(); final int payloadSize = sessionState.sizeof(); - //TODO: is this correct? What is this? - int reserved = payloadSize; - - sessionStream.doSessionData(traceId, reserved, sessionState); + sessionStream.doSessionData(traceId, payloadSize, sessionState); } private void sendUnsuback( @@ -3109,23 +3061,26 @@ private void onSessionReset( final OctetsFW extension = reset.extension(); final MqttResetExFW mqttResetEx = extension.get(mqttResetExRO::tryWrap); - String16FW serverReference = mqttResetEx.serverReference(); - byte reasonCode = SUCCESS; - if (serverReference != null && serverReference.length() != 0) - { - reasonCode = SERVER_MOVED; - } - if (!connected) - { - doCancelConnectTimeout(); - doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, serverReference); - } - else + + if (mqttResetEx != null) { - doEncodeDisconnect(traceId, authorization, reasonCode, serverReference); - } + String16FW serverReference = mqttResetEx.serverReference(); + boolean serverReferenceExists = serverReference != null; + + byte reasonCode = serverReferenceExists ? SERVER_MOVED : SESSION_TAKEN_OVER; + if (!connected) + { + doCancelConnectTimeout(); + doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, + false, serverReference); + } + else + { + doEncodeDisconnect(traceId, authorization, reasonCode, serverReferenceExists ? serverReference : null); + } + } setInitialClosed(); decodeNetwork(traceId); @@ -3230,7 +3185,7 @@ private void onSessionData( subscription.flags = filter.flags(); newState.add(subscription); }); - List currentSubscriptions = sessionStream.getSubscriptions(); + List currentSubscriptions = sessionStream.subscriptions(); if (newState.size() > currentSubscriptions.size()) { List newSubscriptions = newState.stream() @@ -3312,7 +3267,6 @@ private void doSessionBegin( assert state == 0; state = MqttState.openingInitial(state); - application = newStream(this::onSession, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, sessionId, affinity, beginEx); @@ -3333,6 +3287,8 @@ private void doSessionData( final int length = limit - offset; assert reserved >= length + initialPad; + reserved += initialPad; + if (!MqttState.closed(state)) { doData(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, @@ -3374,11 +3330,8 @@ private void doSessionAppEnd( { if (MqttState.initialOpening(state) && !MqttState.initialClosed(state)) { - setReplyClosed(); - doEnd(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, sessionId, extension); - sessionStream = null; } } @@ -3421,11 +3374,6 @@ private void setReplyClosed() assert !MqttState.replyClosed(state); state = MqttState.closeReply(state); - - if (MqttState.closed(state)) - { - sessionStream = null; - } } private void setInitialClosed() @@ -3439,18 +3387,13 @@ private void setInitialClosed() debitor.release(debitorIndex, initialId); debitorIndex = NO_DEBITOR_INDEX; } - - if (MqttState.closed(state)) - { - sessionStream = null; - } } public void setSubscriptions(List subscriptions) { this.subscriptions = subscriptions; } - public List getSubscriptions() + public List subscriptions() { return subscriptions; } @@ -4193,7 +4136,7 @@ private void onSubscribeWindow( iterator.remove(); } - sendNewSessionStateForUnsubscribe(traceId, authorization, ackedTopicFilters); + doSendSessionState(traceId, ackedTopicFilters); } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java index 35d13dfad2..489b9c62ae 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java @@ -88,6 +88,20 @@ public void shouldSubscribeSaveSubscriptionsInSession() throws Exception k3po.finish(); } + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/session.subscribe.multiple.isolated/client", + "${app}/session.subscribe.multiple.isolated/server"}) + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") + public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.yaml") @Specification({ From 7c6813dfac0ec09c69ef16deb14ce456306bf6a8 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 12:08:52 -0700 Subject: [PATCH 016/115] Config builders (#330) --- .../serializer/OtlpMetricsSerializer.java | 10 +- .../config/OtlpExporterConfigTest.java | 18 +- .../http/config/HttpAccessControlConfig.java | 141 +-------- .../HttpAccessControlConfigBuilder.java | 81 ++++++ .../binding/http/config/HttpAllowConfig.java | 115 ++++++++ .../http/config/HttpAllowConfigBuilder.java | 84 ++++++ .../http/config/HttpAuthorizationConfig.java | 50 +--- .../HttpAuthorizationConfigBuilder.java | 59 ++++ .../http/config/HttpConditionConfig.java | 14 +- .../config/HttpConditionConfigBuilder.java | 53 ++++ .../http/config/HttpCredentialsConfig.java | 42 +++ .../config/HttpCredentialsConfigBuilder.java | 91 ++++++ .../binding/http/config/HttpExposeConfig.java | 48 ++++ .../http/config/HttpExposeConfigBuilder.java | 52 ++++ .../http/config/HttpOptionsConfig.java | 14 +- .../http/config/HttpOptionsConfigBuilder.java | 96 +++++++ .../http/config/HttpPatternConfig.java | 37 +++ .../http/config/HttpPatternConfigBuilder.java | 54 ++++ .../binding/http/config/HttpPolicyConfig.java | 22 ++ .../internal/config/HttpBindingConfig.java | 11 +- .../config/HttpConditionConfigAdapter.java | 19 +- .../config/HttpOptionsConfigAdapter.java | 272 +++++++----------- .../internal/stream/HttpServerFactory.java | 4 +- .../HttpConditionConfigAdapterTest.java | 4 +- .../config/HttpOptionsConfigAdapterTest.java | 56 ++-- .../tcp/config/TcpConditionConfig.java | 15 +- .../tcp/config/TcpConditionConfigBuilder.java | 63 ++++ .../binding/tcp/config/TcpOptionsConfig.java | 15 +- .../tcp/config/TcpOptionsConfigBuilder.java | 83 ++++++ .../tcp/internal/config/TcpBindingConfig.java | 2 +- .../config/TcpConditionConfigAdapter.java | 28 +- .../config/TcpOptionsConfigAdapter.java | 27 +- .../config/TcpConditionConfigAdapterTest.java | 7 +- .../config/TcpOptionsConfigAdapterTest.java | 11 +- .../tls/config/TlsConditionConfig.java | 15 +- .../tls/config/TlsConditionConfigBuilder.java | 55 ++++ .../{TlsMutual.java => TlsMutualConfig.java} | 2 +- .../binding/tls/config/TlsOptionsConfig.java | 18 +- .../tls/config/TlsOptionsConfigBuilder.java | 107 +++++++ .../tls/internal/config/TlsBindingConfig.java | 4 +- .../config/TlsConditionConfigAdapter.java | 20 +- .../config/TlsOptionsConfigAdapter.java | 76 +++-- .../config/TlsConditionConfigAdapterTest.java | 5 +- .../config/TlsOptionsConfigAdapterTest.java | 35 ++- .../engine/config/AttributeConfig.java | 8 +- .../engine/config/AttributeConfigBuilder.java | 51 ++++ .../runtime/engine/config/BindingConfig.java | 8 +- .../engine/config/BindingConfigBuilder.java | 135 +++++++++ .../runtime/engine/config/ConfigBuilder.java | 21 ++ .../runtime/engine/config/ConfigReader.java | 2 +- .../runtime/engine/config/ConfigWriter.java | 8 +- .../runtime/engine/config/ExporterConfig.java | 8 +- .../engine/config/ExporterConfigBuilder.java | 66 +++++ .../runtime/engine/config/GuardConfig.java | 8 +- .../engine/config/GuardConfigBuilder.java | 66 +++++ .../runtime/engine/config/GuardedConfig.java | 8 +- .../engine/config/GuardedConfigBuilder.java | 65 +++++ .../runtime/engine/config/MetricConfig.java | 8 +- .../engine/config/MetricConfigBuilder.java | 52 ++++ .../engine/config/MetricRefConfig.java | 8 +- .../engine/config/MetricRefConfigBuilder.java | 43 +++ .../engine/config/NamespaceConfig.java | 14 +- .../engine/config/NamespaceConfigBuilder.java | 162 +++++++++++ .../NamespaceRefConfig.java} | 13 +- .../config/NamespaceRefConfigBuilder.java | 67 +++++ .../runtime/engine/config/RouteConfig.java | 34 +-- .../engine/config/RouteConfigBuilder.java | 114 ++++++++ .../engine/config/TelemetryConfig.java | 9 +- .../engine/config/TelemetryConfigBuilder.java | 97 +++++++ .../engine/config/TelemetryRefConfig.java | 9 +- .../config/TelemetryRefConfigBuilder.java | 55 ++++ .../runtime/engine/config/VaultConfig.java | 8 +- .../engine/config/VaultConfigBuilder.java | 68 +++++ .../internal/config/AttributeAdapter.java | 11 +- .../config/BindingConfigsAdapter.java | 100 ++++--- .../internal/config/ExporterAdapter.java | 10 +- .../engine/internal/config/GuardAdapter.java | 15 +- .../engine/internal/config/MetricAdapter.java | 14 +- .../internal/config/MetricRefAdapter.java | 10 +- .../internal/config/NamespaceAdapter.java | 93 +++--- .../internal/config/NamspaceRefAdapter.java | 40 ++- .../engine/internal/config/RouteAdapter.java | 61 ++-- .../internal/config/TelemetryAdapter.java | 45 +-- .../internal/config/TelemetryRefAdapter.java | 23 +- .../engine/internal/config/VaultAdapter.java | 16 +- .../engine/config/ConfigWriterTest.java | 39 ++- .../config/BindingConfigsAdapterTest.java | 104 +++++-- .../config/ConditionConfigAdapterTest.java | 42 ++- .../config/NamespaceConfigAdapterTest.java | 99 +++++-- ...ava => NamespaceRefConfigAdapterTest.java} | 21 +- .../config/OptionsConfigAdapterTest.java | 8 +- .../config/RouteConfigAdapterTest.java | 21 +- .../config/TelemetryConfigsAdapterTest.java | 46 ++- .../config/TestBindingOptionsConfig.java | 15 +- .../TestBindingOptionsConfigAdapter.java | 14 +- .../TestBindingOptionsConfigBuilder.java | 47 +++ .../config/TestExporterOptionsConfig.java | 15 +- .../TestExporterOptionsConfigAdapter.java | 15 +- .../TestExporterOptionsConfigBuilder.java | 47 +++ .../test/internal/guard/TestGuardConfig.java | 5 - .../test/internal/guard/TestGuardHandler.java | 4 +- .../guard/config/TestGuardOptionsConfig.java | 14 +- .../config/TestGuardOptionsConfigAdapter.java | 55 ++-- .../config/TestGuardOptionsConfigBuilder.java | 88 ++++++ .../vault/config/TestVaultOptionsConfig.java | 15 +- .../config/TestVaultOptionsConfigAdapter.java | 14 +- .../config/TestVaultOptionsConfigBuilder.java | 47 +++ .../EngineTest-duplicate-key.broken.json | 1 + .../PrometheusExporterHandlerTest.java | 6 +- .../guard/jwt/config/JwtKeyConfig.java | 9 +- .../guard/jwt/config/JwtKeyConfigBuilder.java | 109 +++++++ .../guard/jwt/config/JwtOptionsConfig.java | 28 +- .../jwt/config/JwtOptionsConfigBuilder.java | 97 +++++++ .../internal/config/JwtKeyConfigAdapter.java | 48 +++- .../config/JwtOptionsConfigAdapter.java | 52 ++-- .../jwt/internal/JwtGuardHandlerTest.java | 141 ++++++--- .../guard/jwt/internal/JwtGuardTest.java | 156 +++++++--- .../config/JwtOptionsConfigAdapterTest.java | 45 +-- .../jwt/internal/keys/JwtKeyConfigs.java | 46 ++- .../config/FileSystemOptionsConfig.java | 17 +- .../FileSystemOptionsConfigBuilder.java | 78 +++++ .../config/FileSystemStoreConfig.java | 11 +- .../config/FileSystemStoreConfigBuilder.java | 62 ++++ .../FileSystemOptionsConfigAdapter.java | 31 +- .../config/FileSystemStoreConfigAdapter.java | 18 +- .../internal/FileSystemVaultTest.java | 31 +- .../FileSystemOptionsConfigAdapterTest.java | 12 +- 127 files changed, 4403 insertions(+), 1058 deletions(-) create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfigBuilder.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfig.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfigBuilder.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfigBuilder.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfigBuilder.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfig.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfigBuilder.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfig.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfigBuilder.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfigBuilder.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfig.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfigBuilder.java create mode 100644 runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPolicyConfig.java create mode 100644 runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfigBuilder.java create mode 100644 runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfigBuilder.java create mode 100644 runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfigBuilder.java rename runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/{TlsMutual.java => TlsMutualConfig.java} (96%) create mode 100644 runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfigBuilder.java rename runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/{internal/config/NamespaceRef.java => config/NamespaceRefConfig.java} (73%) create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfigBuilder.java create mode 100644 runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfigBuilder.java rename runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/{ReferenceConfigAdapterTest.java => NamespaceRefConfigAdapterTest.java} (81%) create mode 100644 runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigBuilder.java create mode 100644 runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigBuilder.java create mode 100644 runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigBuilder.java create mode 100644 runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigBuilder.java create mode 100644 runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfigBuilder.java create mode 100644 runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfigBuilder.java create mode 100644 runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfigBuilder.java create mode 100644 runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfigBuilder.java diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/serializer/OtlpMetricsSerializer.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/serializer/OtlpMetricsSerializer.java index 301005aa6d..b5499c11b9 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/serializer/OtlpMetricsSerializer.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/internal/serializer/OtlpMetricsSerializer.java @@ -141,8 +141,14 @@ private JsonArrayBuilder attributes( MetricRecord record) { return attributesToJson(List.of( - new AttributeConfig("namespace", record.namespace()), - new AttributeConfig("binding", record.binding()) + AttributeConfig.builder() + .name("namespace") + .value(record.namespace()) + .build(), + AttributeConfig.builder() + .name("binding") + .value(record.binding()) + .build() )); } diff --git a/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfigTest.java b/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfigTest.java index 7ecb23c681..3dc2808cd8 100644 --- a/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfigTest.java +++ b/incubator/exporter-otlp/src/test/java/io/aklivity/zilla/runtime/exporter/otlp/internal/config/OtlpExporterConfigTest.java @@ -37,7 +37,11 @@ public void shouldCreateDefaultMetricsUrl() OtlpOverridesConfig overrides = new OtlpOverridesConfig(null); OtlpEndpointConfig endpoint = new OtlpEndpointConfig("http", URI.create("http://example.com"), overrides); OtlpOptionsConfig options = new OtlpOptionsConfig(30L, Set.of(METRICS), endpoint); - ExporterConfig exporter = new ExporterConfig("oltp0", "oltp", options); + ExporterConfig exporter = ExporterConfig.builder() + .name("oltp0") + .type("oltp") + .options(options) + .build(); OtlpExporterConfig oltpExporter = new OtlpExporterConfig(exporter); // WHEN @@ -54,7 +58,11 @@ public void shouldOverrideAbsoluteMetricsUrl() OtlpOverridesConfig overrides = new OtlpOverridesConfig(URI.create("http://overridden.com/metrics")); OtlpEndpointConfig endpoint = new OtlpEndpointConfig("http", URI.create("http://example.com"), overrides); OtlpOptionsConfig options = new OtlpOptionsConfig(30L, Set.of(METRICS), endpoint); - ExporterConfig exporter = new ExporterConfig("oltp0", "oltp", options); + ExporterConfig exporter = ExporterConfig.builder() + .name("oltp0") + .type("oltp") + .options(options) + .build(); OtlpExporterConfig oltpExporter = new OtlpExporterConfig(exporter); // WHEN @@ -71,7 +79,11 @@ public void shouldOverrideRelativeMetricsUrl() OtlpOverridesConfig overrides = new OtlpOverridesConfig(URI.create("/v42/metrix")); OtlpEndpointConfig endpoint = new OtlpEndpointConfig("http", URI.create("http://example.com"), overrides); OtlpOptionsConfig options = new OtlpOptionsConfig(30L, Set.of(METRICS), endpoint); - ExporterConfig exporter = new ExporterConfig("oltp0", "oltp", options); + ExporterConfig exporter = ExporterConfig.builder() + .name("oltp0") + .type("oltp") + .options(options) + .build(); OtlpExporterConfig oltpExporter = new OtlpExporterConfig(exporter); // WHEN diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfig.java index bea7f0940d..539be1037d 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfig.java @@ -15,10 +15,11 @@ */ package io.aklivity.zilla.runtime.binding.http.config; -import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; -import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.SAME_ORIGIN; import static java.lang.ThreadLocal.withInitial; import static java.util.Collections.unmodifiableSet; +import static java.util.function.Function.identity; import static java.util.regex.Pattern.CASE_INSENSITIVE; import java.net.URI; @@ -42,7 +43,7 @@ public final class HttpAccessControlConfig private static final Pattern HEADERS_PATTERN = Pattern.compile("([^,\\s]+)(:?,\\s*([^,\\\\s]+))*", CASE_INSENSITIVE); private static final ThreadLocal ORIGIN_MATCHER = withInitial(() -> ORIGIN_PATTERN.matcher("")); - private static final ThreadLocal HEADERS_MATCHER = withInitial(() -> HEADERS_PATTERN.matcher("")); + static final ThreadLocal HEADERS_MATCHER = withInitial(() -> HEADERS_PATTERN.matcher("")); private static final ThreadLocal HEADER_BUILDER = ThreadLocal.withInitial(HttpHeaderFW.Builder::new); @@ -82,33 +83,23 @@ public final class HttpAccessControlConfig EXPOSED_RESPONSE_HEADERS = unmodifiableSet(headers); } - public enum HttpPolicyConfig - { - SAME_ORIGIN, - CROSS_ORIGIN - } - public final HttpPolicyConfig policy; public final HttpAllowConfig allow; public final Duration maxAge; public final HttpExposeConfig expose; - public HttpAccessControlConfig( - HttpPolicyConfig policy) + public static HttpAccessControlConfigBuilder builder() { - this.policy = policy; - this.allow = null; - this.maxAge = null; - this.expose = null; + return new HttpAccessControlConfigBuilder<>(identity()); } - public HttpAccessControlConfig( + HttpAccessControlConfig( HttpPolicyConfig policy, HttpAllowConfig allow, Duration maxAge, HttpExposeConfig expose) { - this.policy = CROSS_ORIGIN; + this.policy = policy; this.allow = allow; this.maxAge = maxAge; this.expose = expose; @@ -296,119 +287,7 @@ private boolean matchesAuthority( return matches; } - public static final class HttpAllowConfig - { - public final Set origins; - public final Set methods; - public final Set headers; - public final boolean credentials; - - private final Set implicitOrigins; - - public HttpAllowConfig( - Set origins, - Set methods, - Set headers, - boolean credentials) - { - this.origins = origins; - this.implicitOrigins = origins != null ? asImplicitOrigins(origins) : null; - this.methods = methods; - this.headers = headers != null ? asCaseless(headers) : null; - this.credentials = credentials; - } - - private boolean origin( - String origin) - { - return origins == null || - origins.contains(origin) || - implicitOrigins.contains(origin); - } - - private boolean method( - String method) - { - return methods == null || - methods.contains(method); - } - - private boolean headers( - String headers) - { - return headers == null || - headersMatch(headers); - } - - private boolean headersMatch( - String headers) - { - int match = 0; - - Matcher matchHeaders = HEADERS_MATCHER.get().reset(headers); - while (matchHeaders.find()) - { - if (header(matchHeaders.group(1))) - { - match++; - } - } - - return match > 0; - } - - private boolean header( - String header) - { - return headers == null || - headers.contains(header); - } - - private boolean originExplicit() - { - return credentials || origins != null; - } - - private boolean methodsExplicit() - { - return credentials || methods != null; - } - - private boolean headersExplicit() - { - return credentials || headers != null; - } - - public boolean credentialsExplicit() - { - return credentials; - } - } - - public static final class HttpExposeConfig - { - public final Set headers; - - public HttpExposeConfig( - Set headers) - { - this.headers = headers; - } - - private boolean header( - String header) - { - return headers == null || - headers.contains(header); - } - - private boolean headersExplicit() - { - return headers != null; - } - } - - private static Set asCaseless( + static Set asCaseless( Set cased) { final Set caseless = new TreeSet(String::compareToIgnoreCase); @@ -416,7 +295,7 @@ private static Set asCaseless( return caseless; } - private static Set asImplicitOrigins( + static Set asImplicitOrigins( Set origins) { Set implicit = new LinkedHashSet<>(); diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfigBuilder.java new file mode 100644 index 0000000000..153e964d2b --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfigBuilder.java @@ -0,0 +1,81 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import java.time.Duration; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class HttpAccessControlConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private HttpPolicyConfig policy; + private HttpAllowConfig allow; + private Duration maxAge; + private HttpExposeConfig expose; + + HttpAccessControlConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public HttpAccessControlConfigBuilder policy( + HttpPolicyConfig policy) + { + this.policy = policy; + return this; + } + + public HttpAllowConfigBuilder> allow() + { + return new HttpAllowConfigBuilder<>(this::allow); + } + + public HttpAccessControlConfigBuilder maxAge( + Duration maxAge) + { + this.maxAge = maxAge; + return this; + } + + public HttpExposeConfigBuilder> expose() + { + return new HttpExposeConfigBuilder<>(this::expose); + } + + @Override + public T build() + { + return mapper.apply(new HttpAccessControlConfig(policy, allow, maxAge, expose)); + } + + private HttpAccessControlConfigBuilder allow( + HttpAllowConfig allow) + { + this.allow = allow; + return this; + } + + private HttpAccessControlConfigBuilder expose( + HttpExposeConfig expose) + { + this.expose = expose; + return this; + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfig.java new file mode 100644 index 0000000000..119cee5fef --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfig.java @@ -0,0 +1,115 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import static java.util.function.Function.identity; + +import java.util.Set; +import java.util.regex.Matcher; + +public final class HttpAllowConfig +{ + public final Set origins; + public final Set methods; + public final Set headers; + public final boolean credentials; + + private final Set implicitOrigins; + + public static HttpAllowConfigBuilder builder() + { + return new HttpAllowConfigBuilder<>(identity()); + } + + HttpAllowConfig( + Set origins, + Set methods, + Set headers, + boolean credentials) + { + this.origins = origins; + this.implicitOrigins = origins != null ? HttpAccessControlConfig.asImplicitOrigins(origins) : null; + this.methods = methods; + this.headers = headers != null ? HttpAccessControlConfig.asCaseless(headers) : null; + this.credentials = credentials; + } + + boolean origin( + String origin) + { + return origins == null || + origins.contains(origin) || + implicitOrigins.contains(origin); + } + + boolean method( + String method) + { + return methods == null || + methods.contains(method); + } + + boolean headers( + String headers) + { + return headers == null || + headersMatch(headers); + } + + private boolean headersMatch( + String headers) + { + int match = 0; + + Matcher matchHeaders = HttpAccessControlConfig.HEADERS_MATCHER.get().reset(headers); + while (matchHeaders.find()) + { + if (header(matchHeaders.group(1))) + { + match++; + } + } + + return match > 0; + } + + private boolean header( + String header) + { + return headers == null || + headers.contains(header); + } + + boolean originExplicit() + { + return credentials || origins != null; + } + + boolean methodsExplicit() + { + return credentials || methods != null; + } + + boolean headersExplicit() + { + return credentials || headers != null; + } + + boolean credentialsExplicit() + { + return credentials; + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfigBuilder.java new file mode 100644 index 0000000000..126f246b31 --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfigBuilder.java @@ -0,0 +1,84 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import java.util.LinkedHashSet; +import java.util.Set; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class HttpAllowConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private Set origins; + private Set methods; + private Set headers; + private boolean credentials; + + HttpAllowConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public HttpAllowConfigBuilder origin( + String origin) + { + if (origins == null) + { + origins = new LinkedHashSet<>(); + } + origins.add(origin); + return this; + } + + public HttpAllowConfigBuilder method( + String method) + { + if (methods == null) + { + methods = new LinkedHashSet<>(); + } + methods.add(method); + return this; + } + + public HttpAllowConfigBuilder header( + String header) + { + if (headers == null) + { + headers = new LinkedHashSet<>(); + } + headers.add(header); + return this; + } + + public HttpAllowConfigBuilder credentials( + boolean credentials) + { + this.credentials = credentials; + return this; + } + + @Override + public T build() + { + return mapper.apply(new HttpAllowConfig(origins, methods, headers, credentials)); + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfig.java index f734483f77..7bb3baefd3 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfig.java @@ -15,59 +15,23 @@ */ package io.aklivity.zilla.runtime.binding.http.config; -import java.util.List; -import java.util.function.Function; +import static java.util.function.Function.identity; public final class HttpAuthorizationConfig { - public static final Function, String> DEFAULT_CREDENTIALS = f -> null; - public final String name; public final HttpCredentialsConfig credentials; - public HttpAuthorizationConfig( + public static HttpAuthorizationConfigBuilder builder() + { + return new HttpAuthorizationConfigBuilder<>(identity()); + } + + HttpAuthorizationConfig( String name, HttpCredentialsConfig credentials) { this.name = name; this.credentials = credentials; } - - public static final class HttpCredentialsConfig - { - public final List headers; - public final List parameters; - public final List cookies; - - - public HttpCredentialsConfig( - List headers) - { - this(headers, null, null); - } - - public HttpCredentialsConfig( - List headers, - List parameters, - List cookies) - { - this.headers = headers; - this.parameters = parameters; - this.cookies = cookies; - } - } - - public static final class HttpPatternConfig - { - public final String name; - public final String pattern; - - public HttpPatternConfig( - String name, - String pattern) - { - this.name = name; - this.pattern = pattern; - } - } } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfigBuilder.java new file mode 100644 index 0000000000..d27ff19cb5 --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfigBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class HttpAuthorizationConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String name; + private HttpCredentialsConfig credentials; + + HttpAuthorizationConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public HttpAuthorizationConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public HttpCredentialsConfigBuilder> credentials() + { + return new HttpCredentialsConfigBuilder<>(this::credentials); + } + + @Override + public T build() + { + return mapper.apply(new HttpAuthorizationConfig(name, credentials)); + } + + private HttpAuthorizationConfigBuilder credentials( + HttpCredentialsConfig credentials) + { + this.credentials = credentials; + return this; + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfig.java index f3a3d42a96..aa19a7f7f3 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.binding.http.config; import java.util.Map; +import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; @@ -23,7 +24,18 @@ public final class HttpConditionConfig extends ConditionConfig { public final Map headers; - public HttpConditionConfig( + public static HttpConditionConfigBuilder builder() + { + return new HttpConditionConfigBuilder<>(HttpConditionConfig.class::cast); + } + + public static HttpConditionConfigBuilder builder( + Function mapper) + { + return new HttpConditionConfigBuilder<>(mapper); + } + + HttpConditionConfig( Map headers) { this.headers = headers; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfigBuilder.java new file mode 100644 index 0000000000..45745fbe98 --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfigBuilder.java @@ -0,0 +1,53 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConditionConfig; +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class HttpConditionConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private Map headers; + + HttpConditionConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public HttpConditionConfigBuilder header( + String name, + String value) + { + if (headers == null) + { + headers = new LinkedHashMap<>(); + } + headers.put(name, value); + return this; + } + + public T build() + { + return mapper.apply(new HttpConditionConfig(headers)); + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfig.java new file mode 100644 index 0000000000..837a8483c3 --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfig.java @@ -0,0 +1,42 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import static java.util.function.Function.identity; + +import java.util.List; + +public final class HttpCredentialsConfig +{ + public final List headers; + public final List parameters; + public final List cookies; + + public static HttpCredentialsConfigBuilder builder() + { + return new HttpCredentialsConfigBuilder<>(identity()); + } + + HttpCredentialsConfig( + List headers, + List parameters, + List cookies) + { + this.headers = headers; + this.parameters = parameters; + this.cookies = cookies; + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfigBuilder.java new file mode 100644 index 0000000000..28b0c3f0e6 --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfigBuilder.java @@ -0,0 +1,91 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import java.util.LinkedList; +import java.util.List; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class HttpCredentialsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private List headers; + private List parameters; + private List cookies; + + HttpCredentialsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public HttpPatternConfigBuilder> header() + { + return new HttpPatternConfigBuilder<>(this::header); + } + + public HttpPatternConfigBuilder> parameter() + { + return new HttpPatternConfigBuilder<>(this::parameter); + } + + public HttpPatternConfigBuilder> cookie() + { + return new HttpPatternConfigBuilder<>(this::cookie); + } + + @Override + public T build() + { + return mapper.apply(new HttpCredentialsConfig(headers, parameters, cookies)); + } + + private HttpCredentialsConfigBuilder header( + HttpPatternConfig header) + { + if (headers == null) + { + headers = new LinkedList<>(); + } + headers.add(header); + return this; + } + + private HttpCredentialsConfigBuilder parameter( + HttpPatternConfig parameter) + { + if (parameters == null) + { + parameters = new LinkedList<>(); + } + parameters.add(parameter); + return this; + } + + private HttpCredentialsConfigBuilder cookie( + HttpPatternConfig cookie) + { + if (cookies == null) + { + cookies = new LinkedList<>(); + } + cookies.add(cookie); + return this; + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfig.java new file mode 100644 index 0000000000..b6d2263582 --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfig.java @@ -0,0 +1,48 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import static java.util.function.Function.identity; + +import java.util.Set; + +public final class HttpExposeConfig +{ + public final Set headers; + + public HttpExposeConfigBuilder builder() + { + return new HttpExposeConfigBuilder<>(identity()); + } + + HttpExposeConfig( + Set headers) + { + this.headers = headers; + } + + boolean header( + String header) + { + return headers == null || + headers.contains(header); + } + + boolean headersExplicit() + { + return headers != null; + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfigBuilder.java new file mode 100644 index 0000000000..2734a3ae1a --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfigBuilder.java @@ -0,0 +1,52 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import java.util.LinkedHashSet; +import java.util.Set; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class HttpExposeConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private Set headers; + + public HttpExposeConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public HttpExposeConfigBuilder header( + String header) + { + if (headers == null) + { + headers = new LinkedHashSet<>(); + } + headers.add(header); + return this; + } + + @Override + public T build() + { + return mapper.apply(new HttpExposeConfig(headers)); + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java index fdcef5be3f..bc0933b546 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfig.java @@ -17,6 +17,7 @@ import java.util.Map; import java.util.SortedSet; +import java.util.function.Function; import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.http.internal.types.String8FW; @@ -29,7 +30,18 @@ public final class HttpOptionsConfig extends OptionsConfig public final HttpAccessControlConfig access; public final HttpAuthorizationConfig authorization; - public HttpOptionsConfig( + public static HttpOptionsConfigBuilder builder() + { + return new HttpOptionsConfigBuilder<>(HttpOptionsConfig.class::cast); + } + + public static HttpOptionsConfigBuilder builder( + Function mapper) + { + return new HttpOptionsConfigBuilder<>(mapper); + } + + HttpOptionsConfig( SortedSet versions, Map overrides, HttpAccessControlConfig access, diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfigBuilder.java new file mode 100644 index 0000000000..23dec3727a --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfigBuilder.java @@ -0,0 +1,96 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; +import io.aklivity.zilla.runtime.binding.http.internal.types.String8FW; +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public final class HttpOptionsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private SortedSet versions; + private Map overrides; + private HttpAccessControlConfig access; + private HttpAuthorizationConfig authorization; + + HttpOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public HttpOptionsConfigBuilder version( + HttpVersion version) + { + if (versions == null) + { + versions = new TreeSet<>(); + } + versions.add(version); + return this; + } + + public HttpOptionsConfigBuilder override( + String8FW name, + String16FW value) + { + if (overrides == null) + { + overrides = new LinkedHashMap<>(); + } + overrides.put(name, value); + return this; + } + + public HttpAccessControlConfigBuilder> access() + { + return new HttpAccessControlConfigBuilder<>(this::access); + } + + public HttpAuthorizationConfigBuilder> authorization() + { + return new HttpAuthorizationConfigBuilder<>(this::authorization); + } + + @Override + public T build() + { + return mapper.apply(new HttpOptionsConfig(versions, overrides, access, authorization)); + } + + private HttpOptionsConfigBuilder authorization( + HttpAuthorizationConfig authorization) + { + this.authorization = authorization; + return this; + } + + private HttpOptionsConfigBuilder access( + HttpAccessControlConfig access) + { + this.access = access; + return this; + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfig.java new file mode 100644 index 0000000000..97d22dc90b --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfig.java @@ -0,0 +1,37 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import static java.util.function.Function.identity; + +public final class HttpPatternConfig +{ + public final String name; + public final String pattern; + + public static HttpPatternConfigBuilder builder() + { + return new HttpPatternConfigBuilder<>(identity()); + } + + HttpPatternConfig( + String name, + String pattern) + { + this.name = name; + this.pattern = pattern; + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfigBuilder.java new file mode 100644 index 0000000000..2cc4485696 --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfigBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class HttpPatternConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String name; + private String pattern; + + HttpPatternConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public HttpPatternConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public HttpPatternConfigBuilder pattern( + String pattern) + { + this.pattern = pattern; + return this; + } + + @Override + public T build() + { + return mapper.apply(new HttpPatternConfig(name, pattern)); + } +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPolicyConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPolicyConfig.java new file mode 100644 index 0000000000..74a0aa1c93 --- /dev/null +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPolicyConfig.java @@ -0,0 +1,22 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.http.config; + +public enum HttpPolicyConfig +{ + SAME_ORIGIN, + CROSS_ORIGIN +} diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java index b58dd778a7..9bbe39aa86 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpBindingConfig.java @@ -15,8 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.config; -import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; -import static io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.DEFAULT_CREDENTIALS; +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.SAME_ORIGIN; import static java.util.EnumSet.allOf; import static java.util.stream.Collectors.toList; @@ -29,17 +28,19 @@ import java.util.regex.Pattern; import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpCredentialsConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpPatternConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpCredentialsConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpPatternConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; public final class HttpBindingConfig { + private static final Function, String> DEFAULT_CREDENTIALS = f -> null; private static final SortedSet DEFAULT_VERSIONS = new TreeSet<>(allOf(HttpVersion.class)); - private static final HttpAccessControlConfig DEFAULT_ACCESS_CONTROL = new HttpAccessControlConfig(SAME_ORIGIN); + private static final HttpAccessControlConfig DEFAULT_ACCESS_CONTROL = + HttpAccessControlConfig.builder().policy(SAME_ORIGIN).build(); public final long id; public final String name; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapter.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapter.java index f13363c6b1..b259a01dd7 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapter.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapter.java @@ -15,9 +15,6 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.config; -import java.util.LinkedHashMap; -import java.util.Map; - import jakarta.json.Json; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; @@ -25,6 +22,7 @@ import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfigBuilder; import io.aklivity.zilla.runtime.binding.http.internal.HttpBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; @@ -63,19 +61,14 @@ public JsonObject adaptToJson( public ConditionConfig adaptFromJson( JsonObject object) { - JsonObject headers = object.containsKey(HEADERS_NAME) - ? object.getJsonObject(HEADERS_NAME) - : null; - - Map newHeaders = null; + HttpConditionConfigBuilder httpCondition = HttpConditionConfig.builder(); - if (headers != null) + if (object.containsKey(HEADERS_NAME)) { - Map newHeaders0 = new LinkedHashMap<>(); - headers.forEach((k, v) -> newHeaders0.put(k, JsonString.class.cast(v).getString())); - newHeaders = newHeaders0; + object.getJsonObject(HEADERS_NAME) + .forEach((k, v) -> httpCondition.header(k, JsonString.class.cast(v).getString())); } - return new HttpConditionConfig(newHeaders); + return httpCondition.build(); } } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapter.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapter.java index f903680c88..3803b9c771 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapter.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapter.java @@ -15,35 +15,30 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.config; -import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; -import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.SAME_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.SAME_ORIGIN; import java.time.Duration; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; import jakarta.json.Json; -import jakarta.json.JsonArray; import jakarta.json.JsonArrayBuilder; -import jakarta.json.JsonNumber; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; import jakarta.json.JsonString; import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpAllowConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpExposeConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfigBuilder; +import io.aklivity.zilla.runtime.binding.http.config.HttpAllowConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAllowConfigBuilder; import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpCredentialsConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpPatternConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfigBuilder; +import io.aklivity.zilla.runtime.binding.http.config.HttpCredentialsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpCredentialsConfigBuilder; +import io.aklivity.zilla.runtime.binding.http.config.HttpExposeConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpExposeConfigBuilder; import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfigBuilder; import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.HttpBinding; import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; @@ -238,197 +233,144 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - JsonArray versions = object.containsKey(VERSIONS_NAME) - ? object.getJsonArray(VERSIONS_NAME) - : null; + HttpOptionsConfigBuilder httpOptions = HttpOptionsConfig.builder(); - SortedSet newVersions = null; - - if (versions != null) + if (object.containsKey(VERSIONS_NAME)) { - SortedSet newVersions0 = new TreeSet(); - versions.forEach(v -> - newVersions0.add(HttpVersion.of(JsonString.class.cast(v).getString()))); - newVersions = newVersions0; + object.getJsonArray(VERSIONS_NAME) + .forEach(v -> httpOptions.version(HttpVersion.of(JsonString.class.cast(v).getString()))); } - HttpAuthorizationConfig newAuthorization = null; - - JsonObject authorizations = object.containsKey(AUTHORIZATION_NAME) - ? object.getJsonObject(AUTHORIZATION_NAME) - : null; - - if (authorizations != null) + if (object.containsKey(AUTHORIZATION_NAME)) { + HttpAuthorizationConfigBuilder httpAuthorization = httpOptions.authorization(); + + JsonObject authorizations = object.getJsonObject(AUTHORIZATION_NAME); for (String name : authorizations.keySet()) { JsonObject authorization = authorizations.getJsonObject(name); - - HttpCredentialsConfig newCredentials = null; - JsonObject credentials = authorization.getJsonObject(AUTHORIZATION_CREDENTIALS_NAME); if (credentials != null) { - List newHeaders = - adaptPatternFromJson(credentials, AUTHORIZATION_CREDENTIALS_HEADERS_NAME); + HttpCredentialsConfigBuilder httpCredentials = httpAuthorization + .name(name) + .credentials(); - List newParameters = - adaptPatternFromJson(credentials, AUTHORIZATION_CREDENTIALS_QUERY_NAME); + if (credentials.containsKey(AUTHORIZATION_CREDENTIALS_HEADERS_NAME)) + { + credentials.getJsonObject(AUTHORIZATION_CREDENTIALS_HEADERS_NAME) + .forEach((n, v) -> httpCredentials.header() + .name(n) + .pattern(JsonString.class.cast(v).getString()) + .build()); + } - List newCookies = - adaptPatternFromJson(credentials, AUTHORIZATION_CREDENTIALS_COOKIES_NAME); + if (credentials.containsKey(AUTHORIZATION_CREDENTIALS_QUERY_NAME)) + { + credentials.getJsonObject(AUTHORIZATION_CREDENTIALS_QUERY_NAME) + .forEach((n, v) -> httpCredentials.parameter() + .name(n) + .pattern(JsonString.class.cast(v).getString()) + .build()); + } - newCredentials = new HttpCredentialsConfig(newHeaders, newParameters, newCookies); - } + if (credentials.containsKey(AUTHORIZATION_CREDENTIALS_COOKIES_NAME)) + { + credentials.getJsonObject(AUTHORIZATION_CREDENTIALS_COOKIES_NAME) + .forEach((n, v) -> httpCredentials.cookie() + .name(n) + .pattern(JsonString.class.cast(v).getString()) + .build()); + } - newAuthorization = new HttpAuthorizationConfig(name, newCredentials); + httpCredentials.build(); + } } - } - - HttpAccessControlConfig newAccess = null; - JsonObject access = object.containsKey(ACCESS_CONTROL_NAME) - ? object.getJsonObject(ACCESS_CONTROL_NAME) - : null; + httpAuthorization.build(); + } - if (access != null) + if (object.containsKey(ACCESS_CONTROL_NAME)) { - String policy = access.containsKey(POLICY_NAME) - ? access.getString(POLICY_NAME) - : null; + JsonObject access = object.getJsonObject(ACCESS_CONTROL_NAME); - switch (policy) + if (access.containsKey(POLICY_NAME)) { - case POLICY_VALUE_SAME_ORIGIN: - newAccess = new HttpAccessControlConfig(SAME_ORIGIN); - break; - case POLICY_VALUE_CROSS_ORIGIN: - JsonObject allow = access.containsKey(ALLOW_NAME) - ? access.getJsonObject(ALLOW_NAME) - : null; + HttpAccessControlConfigBuilder httpAccess = httpOptions.access(); - HttpAllowConfig newAllow = null; - if (allow != null) + String policy = access.getString(POLICY_NAME); + switch (policy) { - JsonArray origins = allow.containsKey(ALLOW_ORIGINS_NAME) - ? allow.getJsonArray(ALLOW_ORIGINS_NAME) - : null; - - Set newOrigins = null; - if (origins != null) - { - Set newOrigins0 = new LinkedHashSet<>(); - origins.forEach(v -> newOrigins0.add(JsonString.class.cast(v).getString())); - newOrigins = newOrigins0; - } - - JsonArray methods = allow.containsKey(ALLOW_METHODS_NAME) - ? allow.getJsonArray(ALLOW_METHODS_NAME) - : null; + case POLICY_VALUE_SAME_ORIGIN: + httpAccess.policy(SAME_ORIGIN); + break; + case POLICY_VALUE_CROSS_ORIGIN: + httpAccess.policy(CROSS_ORIGIN); - Set newMethods = null; - if (methods != null) + if (access.containsKey(ALLOW_NAME)) { - Set newMethods0 = new LinkedHashSet<>(); - methods.forEach(v -> newMethods0.add(JsonString.class.cast(v).getString())); - newMethods = newMethods0; + HttpAllowConfigBuilder httpAllow = httpAccess.allow(); + JsonObject allow = access.getJsonObject(ALLOW_NAME); + + if (allow.containsKey(ALLOW_ORIGINS_NAME)) + { + allow.getJsonArray(ALLOW_ORIGINS_NAME) + .forEach(v -> httpAllow.origin(JsonString.class.cast(v).getString())); + } + + if (allow.containsKey(ALLOW_METHODS_NAME)) + { + allow.getJsonArray(ALLOW_METHODS_NAME) + .forEach(v -> httpAllow.method(JsonString.class.cast(v).getString())); + } + + if (allow.containsKey(ALLOW_HEADERS_NAME)) + { + allow.getJsonArray(ALLOW_HEADERS_NAME) + .forEach(v -> httpAllow.header(JsonString.class.cast(v).getString())); + } + + if (allow.containsKey(ALLOW_CREDENTIALS_NAME)) + { + httpAllow.credentials(allow.getBoolean(ALLOW_CREDENTIALS_NAME)); + } + + httpAllow.build(); } - JsonArray headers = allow.containsKey(ALLOW_HEADERS_NAME) - ? allow.getJsonArray(ALLOW_HEADERS_NAME) - : null; - - Set newHeaders = null; - if (headers != null) + if (access.containsKey(MAX_AGE_NAME)) { - Set newHeaders0 = new LinkedHashSet<>(); - headers.forEach(v -> newHeaders0.add(JsonString.class.cast(v).getString())); - newHeaders = newHeaders0; + httpAccess.maxAge(Duration.ofSeconds(access.getJsonNumber(MAX_AGE_NAME).longValue())); } - boolean newCredentials = false; - if (allow.containsKey(ALLOW_CREDENTIALS_NAME)) + if (access.containsKey(EXPOSE_NAME)) { - newCredentials = allow.getBoolean(ALLOW_CREDENTIALS_NAME); - } + HttpExposeConfigBuilder httpExpose = httpAccess.expose(); + JsonObject expose = access.getJsonObject(EXPOSE_NAME); - newAllow = new HttpAllowConfig(newOrigins, newMethods, newHeaders, newCredentials); - } + if (expose.containsKey(ALLOW_HEADERS_NAME)) + { + expose.getJsonArray(ALLOW_HEADERS_NAME) + .forEach(v -> httpExpose.header(JsonString.class.cast(v).getString())); + } - Duration newMaxAge = null; - - JsonNumber maxAge = access.containsKey(MAX_AGE_NAME) - ? access.getJsonNumber(MAX_AGE_NAME) - : null; - - if (maxAge != null) - { - newMaxAge = Duration.ofSeconds(maxAge.longValue()); - } - - HttpExposeConfig newExpose = null; - - JsonObject expose = access.containsKey(EXPOSE_NAME) - ? access.getJsonObject(EXPOSE_NAME) - : null; - - if (expose != null) - { - JsonArray headers = expose.containsKey(ALLOW_HEADERS_NAME) - ? expose.getJsonArray(ALLOW_HEADERS_NAME) - : null; - - Set newHeaders = null; - if (headers != null) - { - Set newHeaders0 = new LinkedHashSet<>(); - headers.forEach(v -> newHeaders0.add(JsonString.class.cast(v).getString())); - newHeaders = newHeaders0; + httpExpose.build(); } - newExpose = new HttpExposeConfig(newHeaders); + httpAccess.build(); + break; } - - newAccess = new HttpAccessControlConfig(CROSS_ORIGIN, newAllow, newMaxAge, newExpose); - break; } } - JsonObject overrides = object.containsKey(OVERRIDES_NAME) - ? object.getJsonObject(OVERRIDES_NAME) - : null; - - Map newOverrides = null; - - if (overrides != null) + if (object.containsKey(OVERRIDES_NAME)) { - Map newOverrides0 = new LinkedHashMap<>(); - overrides.forEach((k, v) -> - newOverrides0.put(new String8FW(k), new String16FW(JsonString.class.cast(v).getString()))); - newOverrides = newOverrides0; + object.getJsonObject(OVERRIDES_NAME) + .forEach((k, v) -> + httpOptions.override(new String8FW(k), new String16FW(JsonString.class.cast(v).getString()))); } - return new HttpOptionsConfig(newVersions, newOverrides, newAccess, newAuthorization); - } - - private List adaptPatternFromJson( - JsonObject object, - String property) - { - List newPatterns = null; - if (object.containsKey(property)) - { - newPatterns = new ArrayList<>(); - - JsonObject patterns = object.getJsonObject(property); - for (String name : patterns.keySet()) - { - String pattern = patterns.getString(name); - - newPatterns.add(new HttpPatternConfig(name, pattern)); - } - } - return newPatterns; + return httpOptions.build(); } } diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java index 600494953d..78f0974c7c 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.stream; -import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.CROSS_ORIGIN; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext.CONNECTION; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext.KEEP_ALIVE; import static io.aklivity.zilla.runtime.binding.http.internal.hpack.HpackContext.PROXY_CONNECTION; @@ -80,7 +80,7 @@ import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.HttpBinding; import io.aklivity.zilla.runtime.binding.http.internal.HttpConfiguration; diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java index 5f361252a8..fc30e64ad1 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java @@ -62,7 +62,9 @@ public void shouldReadCondition() @Test public void shouldWriteCondition() { - HttpConditionConfig condition = new HttpConditionConfig(singletonMap(":authority", "example.net:443")); + HttpConditionConfig condition = HttpConditionConfig.builder() + .header(":authority", "example.net:443") + .build(); String text = jsonb.toJson(condition); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java index a3a6ed74c7..c96a0e45da 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java @@ -15,9 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.http.internal.config; -import static io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.CROSS_ORIGIN; import static java.util.Collections.singleton; -import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -28,7 +27,6 @@ import java.time.Duration; import java.util.EnumSet; -import java.util.TreeSet; import jakarta.json.bind.Jsonb; import jakarta.json.bind.JsonbBuilder; @@ -37,12 +35,6 @@ import org.junit.Before; import org.junit.Test; -import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpAllowConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAccessControlConfig.HttpExposeConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpCredentialsConfig; -import io.aklivity.zilla.runtime.binding.http.config.HttpAuthorizationConfig.HttpPatternConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfig; import io.aklivity.zilla.runtime.binding.http.config.HttpVersion; import io.aklivity.zilla.runtime.binding.http.internal.types.String16FW; @@ -132,25 +124,33 @@ public void shouldReadOptions() @Test public void shouldWriteOptions() { - HttpOptionsConfig options = new HttpOptionsConfig( - new TreeSet<>(EnumSet.allOf(HttpVersion.class)), - singletonMap(new String8FW(":authority"), new String16FW("example.com:443")), - new HttpAccessControlConfig( - CROSS_ORIGIN, - new HttpAllowConfig( - singleton("https://example.com:9090"), - singleton("DELETE"), - singleton("x-api-key"), - true), - Duration.ofSeconds(10), - new HttpExposeConfig( - singleton("x-custom-header"))), - new HttpAuthorizationConfig( - "test0", - new HttpCredentialsConfig( - singletonList(new HttpPatternConfig( - "authorization", - "Bearer {credentials}"))))); + HttpOptionsConfig options = HttpOptionsConfig.builder() + .version(HttpVersion.HTTP_1_1) + .version(HttpVersion.HTTP_2) + .override(new String8FW(":authority"), new String16FW("example.com:443")) + .access() + .policy(CROSS_ORIGIN) + .allow() + .origin("https://example.com:9090") + .method("DELETE") + .header("x-api-key") + .credentials(true) + .build() + .maxAge(Duration.ofSeconds(10)) + .expose() + .header("x-custom-header") + .build() + .build() + .authorization() + .name("test0") + .credentials() + .header() + .name("authorization") + .pattern("Bearer {credentials}") + .build() + .build() + .build() + .build(); String text = jsonb.toJson(options); diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfig.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfig.java index 8b765aeb11..6e9b53d85f 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfig.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfig.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.tcp.config; +import java.util.function.Function; + import io.aklivity.zilla.runtime.engine.config.ConditionConfig; public final class TcpConditionConfig extends ConditionConfig @@ -23,7 +25,18 @@ public final class TcpConditionConfig extends ConditionConfig public final String authority; public final int[] ports; - public TcpConditionConfig( + public static TcpConditionConfigBuilder builder() + { + return new TcpConditionConfigBuilder<>(TcpConditionConfig.class::cast); + } + + public static TcpConditionConfigBuilder builder( + Function mapper) + { + return new TcpConditionConfigBuilder<>(mapper); + } + + TcpConditionConfig( String cidr, String authority, int[] ports) diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfigBuilder.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfigBuilder.java new file mode 100644 index 0000000000..d1dd01bdb8 --- /dev/null +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfigBuilder.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.tcp.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConditionConfig; +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class TcpConditionConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String cidr; + private String authority; + private int[] ports; + + TcpConditionConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TcpConditionConfigBuilder cidr( + String cidr) + { + this.cidr = cidr; + return this; + } + + public TcpConditionConfigBuilder authority( + String authority) + { + this.authority = authority; + return this; + } + + public TcpConditionConfigBuilder ports( + int[] ports) + { + this.ports = ports; + return this; + } + + @Override + public T build() + { + return mapper.apply(new TcpConditionConfig(cidr, authority, ports)); + } +} diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfig.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfig.java index 97cd0e7e44..20427f1024 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfig.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfig.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.tcp.config; +import java.util.function.Function; + import io.aklivity.zilla.runtime.engine.config.OptionsConfig; public final class TcpOptionsConfig extends OptionsConfig @@ -25,7 +27,18 @@ public final class TcpOptionsConfig extends OptionsConfig public final boolean nodelay; public final boolean keepalive; - public TcpOptionsConfig( + public static TcpOptionsConfigBuilder builder() + { + return new TcpOptionsConfigBuilder<>(TcpOptionsConfig.class::cast); + } + + public static TcpOptionsConfigBuilder builder( + Function mapper) + { + return new TcpOptionsConfigBuilder<>(mapper); + } + + TcpOptionsConfig( String host, int[] ports, int backlog, diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfigBuilder.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfigBuilder.java new file mode 100644 index 0000000000..fedbda81f3 --- /dev/null +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfigBuilder.java @@ -0,0 +1,83 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.tcp.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public final class TcpOptionsConfigBuilder implements ConfigBuilder +{ + public static final int BACKLOG_DEFAULT = 0; + public static final boolean NODELAY_DEFAULT = true; + public static final boolean KEEPALIVE_DEFAULT = false; + + private final Function mapper; + + private String host; + private int[] ports; + private int backlog = BACKLOG_DEFAULT; + private boolean nodelay = NODELAY_DEFAULT; + private boolean keepalive = KEEPALIVE_DEFAULT; + + TcpOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TcpOptionsConfigBuilder host( + String host) + { + this.host = host; + return this; + } + + public TcpOptionsConfigBuilder ports( + int[] ports) + { + this.ports = ports; + return this; + } + + public TcpOptionsConfigBuilder backlog( + int backlog) + { + this.backlog = backlog; + return this; + } + + public TcpOptionsConfigBuilder nodelay( + boolean nodelay) + { + this.nodelay = nodelay; + return this; + } + + public TcpOptionsConfigBuilder keepalive( + boolean keepalive) + { + this.keepalive = keepalive; + return this; + } + + @Override + public T build() + { + return mapper.apply(new TcpOptionsConfig(host, ports, backlog, nodelay, keepalive)); + } +} diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpBindingConfig.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpBindingConfig.java index e88948dd43..3f6e5e0276 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpBindingConfig.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpBindingConfig.java @@ -70,7 +70,7 @@ public TcpRouteConfig resolve( private static List initDefaultClientRoutes() { - final RouteConfig route = new RouteConfig(null); + final RouteConfig route = RouteConfig.builder().build(); route.authorized = id -> true; return singletonList(new TcpRouteConfig(route)); diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapter.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapter.java index d53476fc8f..3c5da83525 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapter.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapter.java @@ -29,6 +29,7 @@ import org.agrona.collections.MutableInteger; import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfig; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfigBuilder; import io.aklivity.zilla.runtime.binding.tcp.internal.TcpBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; @@ -88,14 +89,22 @@ public JsonObject adaptToJson( public ConditionConfig adaptFromJson( JsonObject object) { - String cidr = object.containsKey(CIDR_NAME) ? object.getString(CIDR_NAME) : null; - String authority = object.containsKey(AUTHORITY_NAME) ? object.getString(AUTHORITY_NAME) : null; - JsonValue portsValue = object.containsKey(PORT_NAME) ? object.get(PORT_NAME) : null; + TcpConditionConfigBuilder tcpCondition = TcpConditionConfig.builder(); - int[] ports = null; + if (object.containsKey(CIDR_NAME)) + { + tcpCondition.cidr(object.getString(CIDR_NAME)); + } - if (portsValue != null) + if (object.containsKey(AUTHORITY_NAME)) { + tcpCondition.authority(object.getString(AUTHORITY_NAME)); + } + + if (object.containsKey(PORT_NAME)) + { + JsonValue portsValue = object.get(PORT_NAME); + IntHashSet portsSet = new IntHashSet(); switch (portsValue.getValueType()) { @@ -108,12 +117,13 @@ public ConditionConfig adaptFromJson( break; } - int[] ports0 = new int[portsSet.size()]; + int[] ports = new int[portsSet.size()]; MutableInteger index = new MutableInteger(); - portsSet.forEach(i -> ports0[index.value++] = i); - ports = ports0; + portsSet.forEach(i -> ports[index.value++] = i); + + tcpCondition.ports(ports); } - return new TcpConditionConfig(cidr, authority, ports); + return tcpCondition.build(); } } diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapter.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapter.java index b6dd8ed617..ed8fa00d99 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapter.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapter.java @@ -15,6 +15,10 @@ */ package io.aklivity.zilla.runtime.binding.tcp.internal.config; +import static io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfigBuilder.BACKLOG_DEFAULT; +import static io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfigBuilder.KEEPALIVE_DEFAULT; +import static io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfigBuilder.NODELAY_DEFAULT; + import java.util.stream.IntStream; import jakarta.json.Json; @@ -31,6 +35,7 @@ import org.agrona.collections.MutableInteger; import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfigBuilder; import io.aklivity.zilla.runtime.binding.tcp.internal.TcpBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; @@ -41,10 +46,6 @@ public final class TcpOptionsConfigAdapter implements OptionsConfigAdapterSpi, J private static final String PORT_NAME = "port"; private static final String BACKLOG_NAME = "backlog"; - private static final int BACKLOG_DEFAULT = 0; - private static final boolean NODELAY_DEFAULT = true; - private static final boolean KEEPALIVE_DEFAULT = false; - @Override public Kind kind() { @@ -100,14 +101,12 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - String host = object.getString(HOST_NAME); - JsonValue portsValue = object.get(PORT_NAME); - int backlog = object.containsKey(BACKLOG_NAME) ? object.getJsonNumber(BACKLOG_NAME).intValue() : BACKLOG_DEFAULT; - boolean nodelay = NODELAY_DEFAULT; - boolean keepalive = KEEPALIVE_DEFAULT; + final TcpOptionsConfigBuilder tcpOptions = TcpOptionsConfig.builder(); - IntHashSet portsSet = new IntHashSet(); + tcpOptions.host(object.getString(HOST_NAME)); + JsonValue portsValue = object.get(PORT_NAME); + IntHashSet portsSet = new IntHashSet(); switch (portsValue.getValueType()) { case ARRAY: @@ -122,8 +121,14 @@ public OptionsConfig adaptFromJson( int[] ports = new int[portsSet.size()]; MutableInteger index = new MutableInteger(); portsSet.forEach(i -> ports[index.value++] = i); + tcpOptions.ports(ports); + + if (object.containsKey(BACKLOG_NAME)) + { + tcpOptions.backlog(object.getJsonNumber(BACKLOG_NAME).intValue()); + } - return new TcpOptionsConfig(host, ports, backlog, nodelay, keepalive); + return tcpOptions.build(); } static void adaptPortsValueFromJson( diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java index 25d3ac237e..75f25e5f29 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java @@ -64,13 +64,18 @@ public void shouldReadCondition() @Test public void shouldWriteCondition() { - TcpConditionConfig condition = new TcpConditionConfig("127.0.0.0/24", "*.example.net", new int[] { 8080 }); + TcpConditionConfig condition = TcpConditionConfig.builder() + .cidr("127.0.0.0/24") + .authority("*.example.net") + .ports(new int[] { 8080 }) + .build(); String text = jsonb.toJson(condition); assertThat(text, not(nullValue())); assertThat(text, equalTo("{\"cidr\":\"127.0.0.0/24\",\"authority\":\"*.example.net\",\"port\":8080}")); } + @Test public void shouldReadConditionWithPortRange() { diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java index 33f9c16035..47d6b16d3f 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java @@ -99,7 +99,10 @@ public void shouldReadOptionsWithPortRangeSingleton() @Test public void shouldWriteOptions() { - TcpOptionsConfig options = new TcpOptionsConfig("localhost", new int[] { 8080 }, 0, true, false); + TcpOptionsConfig options = TcpOptionsConfig.builder() + .host("localhost") + .ports(new int[] { 8080 }) + .build(); String text = jsonb.toJson(options); @@ -130,7 +133,11 @@ public void shouldReadOptionsWithBacklog() @Test public void shouldWriteOptionsWithBacklog() { - TcpOptionsConfig options = new TcpOptionsConfig("localhost", new int[] { 8080 }, 1000, true, false); + TcpOptionsConfig options = TcpOptionsConfig.builder() + .host("localhost") + .ports(new int[] { 8080 }) + .backlog(1000) + .build(); String text = jsonb.toJson(options); diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfig.java index 79694818f5..473a7f90f2 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfig.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.tls.config; +import java.util.function.Function; + import io.aklivity.zilla.runtime.engine.config.ConditionConfig; public final class TlsConditionConfig extends ConditionConfig @@ -22,7 +24,18 @@ public final class TlsConditionConfig extends ConditionConfig public final String authority; public final String alpn; - public TlsConditionConfig( + public static TlsConditionConfigBuilder builder() + { + return new TlsConditionConfigBuilder<>(TlsConditionConfig.class::cast); + } + + public static TlsConditionConfigBuilder builder( + Function mapper) + { + return new TlsConditionConfigBuilder<>(mapper); + } + + TlsConditionConfig( String authority, String alpn) { diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfigBuilder.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfigBuilder.java new file mode 100644 index 0000000000..4d6ee3820b --- /dev/null +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfigBuilder.java @@ -0,0 +1,55 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.tls.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConditionConfig; +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class TlsConditionConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String authority; + private String alpn; + + TlsConditionConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TlsConditionConfigBuilder authority( + String authority) + { + this.authority = authority; + return this; + } + + public TlsConditionConfigBuilder alpn( + String alpn) + { + this.alpn = alpn; + return this; + } + + @Override + public T build() + { + return mapper.apply(new TlsConditionConfig(authority, alpn)); + } +} diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutual.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutualConfig.java similarity index 96% rename from runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutual.java rename to runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutualConfig.java index 94d623fac1..03d153d3ff 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutual.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsMutualConfig.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.tls.config; -public enum TlsMutual +public enum TlsMutualConfig { NONE, REQUESTED, diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfig.java index eeff36b470..c89313fe66 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.binding.tls.config; import java.util.List; +import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; @@ -26,17 +27,28 @@ public final class TlsOptionsConfig extends OptionsConfig public final List trust; public final List sni; public final List alpn; - public final TlsMutual mutual; + public final TlsMutualConfig mutual; public final List signers; public final boolean trustcacerts; - public TlsOptionsConfig( + public static TlsOptionsConfigBuilder builder() + { + return new TlsOptionsConfigBuilder<>(TlsOptionsConfig.class::cast); + } + + public static TlsOptionsConfigBuilder builder( + Function mapper) + { + return new TlsOptionsConfigBuilder<>(mapper); + } + + TlsOptionsConfig( String version, List keys, List trust, List sni, List alpn, - TlsMutual mutual, + TlsMutualConfig mutual, List signers, boolean trustcacerts) { diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfigBuilder.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfigBuilder.java new file mode 100644 index 0000000000..498d59321a --- /dev/null +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfigBuilder.java @@ -0,0 +1,107 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.tls.config; + +import static io.aklivity.zilla.runtime.binding.tls.config.TlsMutualConfig.REQUIRED; + +import java.util.List; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public final class TlsOptionsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String version; + private List keys; + private List trust; + private List sni; + private List alpn; + private TlsMutualConfig mutual; + private List signers; + private boolean trustcacerts; + + TlsOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TlsOptionsConfigBuilder version( + String version) + { + this.version = version; + return this; + } + + public TlsOptionsConfigBuilder keys( + List keys) + { + this.keys = keys; + return this; + } + + public TlsOptionsConfigBuilder trust( + List trust) + { + this.trust = trust; + return this; + } + + public TlsOptionsConfigBuilder sni( + List sni) + { + this.sni = sni; + return this; + } + + public TlsOptionsConfigBuilder alpn( + List alpn) + { + this.alpn = alpn; + return this; + } + + public TlsOptionsConfigBuilder mutual( + TlsMutualConfig mutual) + { + this.mutual = mutual; + return this; + } + + public TlsOptionsConfigBuilder signers( + List signers) + { + this.signers = signers; + return this; + } + + public TlsOptionsConfigBuilder trustcacerts( + boolean trustcacerts) + { + this.trustcacerts = trustcacerts; + return this; + } + + @Override + public T build() + { + TlsMutualConfig mutual = this.mutual == null && this.trust != null ? REQUIRED : this.mutual; + return mapper.apply(new TlsOptionsConfig(version, keys, trust, sni, alpn, mutual, signers, trustcacerts)); + } +} diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java index 19e59b54b4..6fb1b5a0aa 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsBindingConfig.java @@ -49,7 +49,7 @@ import org.agrona.LangUtil; -import io.aklivity.zilla.runtime.binding.tls.config.TlsMutual; +import io.aklivity.zilla.runtime.binding.tls.config.TlsMutualConfig; import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfig; import io.aklivity.zilla.runtime.binding.tls.internal.TlsConfiguration; import io.aklivity.zilla.runtime.binding.tls.internal.identity.TlsClientX509ExtendedKeyManager; @@ -244,7 +244,7 @@ public SSLEngine newServerEngine( engine = context.createSSLEngine(); engine.setUseClientMode(false); - TlsMutual mutual = Optional.ofNullable(options != null ? options.mutual : null).orElse(TlsMutual.NONE); + TlsMutualConfig mutual = Optional.ofNullable(options != null ? options.mutual : null).orElse(TlsMutualConfig.NONE); switch (mutual) { diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapter.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapter.java index 473a5736f4..0a6171a29a 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapter.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapter.java @@ -21,6 +21,7 @@ import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.binding.tls.config.TlsConditionConfig; +import io.aklivity.zilla.runtime.binding.tls.config.TlsConditionConfigBuilder; import io.aklivity.zilla.runtime.binding.tls.internal.TlsBinding; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; @@ -61,13 +62,18 @@ public JsonObject adaptToJson( public ConditionConfig adaptFromJson( JsonObject object) { - String authority = object.containsKey(AUTHORITY_NAME) - ? object.getString(AUTHORITY_NAME) - : null; - String alpn = object.containsKey(ALPN_NAME) - ? object.getString(ALPN_NAME) - : null; + TlsConditionConfigBuilder tlsCondition = TlsConditionConfig.builder(); - return new TlsConditionConfig(authority, alpn); + if (object.containsKey(AUTHORITY_NAME)) + { + tlsCondition.authority(object.getString(AUTHORITY_NAME)); + } + + if (object.containsKey(ALPN_NAME)) + { + tlsCondition.alpn(object.getString(ALPN_NAME)); + } + + return tlsCondition.build(); } } diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapter.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapter.java index fb869c2be7..d61e50819d 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapter.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapter.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.tls.internal.config; -import static io.aklivity.zilla.runtime.binding.tls.config.TlsMutual.REQUIRED; +import static io.aklivity.zilla.runtime.binding.tls.config.TlsMutualConfig.REQUIRED; import static java.util.stream.Collectors.toList; import java.util.List; @@ -29,8 +29,9 @@ import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; -import io.aklivity.zilla.runtime.binding.tls.config.TlsMutual; +import io.aklivity.zilla.runtime.binding.tls.config.TlsMutualConfig; import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfig; +import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfigBuilder; import io.aklivity.zilla.runtime.binding.tls.internal.TlsBinding; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; @@ -105,7 +106,7 @@ public JsonObject adaptToJson( } if (tlsOptions.mutual != null && - (tlsOptions.mutual != REQUIRED || tlsOptions.trust != null)) + (tlsOptions.trust == null || tlsOptions.mutual != REQUIRED)) { String mutual = tlsOptions.mutual.name().toLowerCase(); object.add(MUTUAL_NAME, mutual); @@ -125,32 +126,49 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - String version = object.containsKey(VERSION_NAME) - ? object.getString(VERSION_NAME) - : null; - List keys = object.containsKey(KEYS_NAME) - ? asListString(object.getJsonArray(KEYS_NAME)) - : null; - List trust = object.containsKey(TRUST_NAME) - ? asListString(object.getJsonArray(TRUST_NAME)) - : null; - boolean trustcacerts = object.containsKey(TRUSTCACERTS_NAME) - ? object.getBoolean(TRUSTCACERTS_NAME) - : false; - List sni = object.containsKey(SNI_NAME) - ? asListString(object.getJsonArray(SNI_NAME)) - : null; - List alpn = object.containsKey(ALPN_NAME) - ? asListString(object.getJsonArray(ALPN_NAME)) - : null; - TlsMutual mutual = object.containsKey(MUTUAL_NAME) - ? TlsMutual.valueOf(object.getString(MUTUAL_NAME).toUpperCase()) - : trust != null ? REQUIRED : null; - List signers = object.containsKey(SIGNERS_NAME) - ? asListString(object.getJsonArray(SIGNERS_NAME)) - : null; - - return new TlsOptionsConfig(version, keys, trust, sni, alpn, mutual, signers, trustcacerts); + TlsOptionsConfigBuilder tlsOptions = TlsOptionsConfig.builder(); + + if (object.containsKey(VERSION_NAME)) + { + tlsOptions.version(object.getString(VERSION_NAME)); + } + + if (object.containsKey(KEYS_NAME)) + { + tlsOptions.keys(asListString(object.getJsonArray(KEYS_NAME))); + } + + if (object.containsKey(TRUST_NAME)) + { + tlsOptions.trust(asListString(object.getJsonArray(TRUST_NAME))); + } + + if (object.containsKey(TRUSTCACERTS_NAME)) + { + tlsOptions.trustcacerts(object.getBoolean(TRUSTCACERTS_NAME)); + } + + if (object.containsKey(SNI_NAME)) + { + tlsOptions.sni(asListString(object.getJsonArray(SNI_NAME))); + } + + if (object.containsKey(ALPN_NAME)) + { + tlsOptions.alpn(asListString(object.getJsonArray(ALPN_NAME))); + } + + if (object.containsKey(MUTUAL_NAME)) + { + tlsOptions.mutual(TlsMutualConfig.valueOf(object.getString(MUTUAL_NAME).toUpperCase())); + } + + if (object.containsKey(SIGNERS_NAME)) + { + tlsOptions.signers(asListString(object.getJsonArray(SIGNERS_NAME))); + } + + return tlsOptions.build(); } private static List asListString( diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java index 02555c813b..537883ad09 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java @@ -60,7 +60,10 @@ public void shouldReadCondition() @Test public void shouldWriteCondition() { - TlsConditionConfig condition = new TlsConditionConfig("example.net", "echo"); + TlsConditionConfig condition = TlsConditionConfig.builder() + .authority("example.net") + .alpn("echo") + .build(); String text = jsonb.toJson(condition); diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java index 256ec74c47..4f88b714e5 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.tls.internal.config; -import static io.aklivity.zilla.runtime.binding.tls.config.TlsMutual.REQUESTED; +import static io.aklivity.zilla.runtime.binding.tls.config.TlsMutualConfig.REQUESTED; import static java.util.Arrays.asList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -60,7 +60,9 @@ public void shouldReadOptions() @Test public void shouldWriteOptions() { - TlsOptionsConfig options = new TlsOptionsConfig("TLSv1.2", null, null, null, null, null, null, false); + TlsOptionsConfig options = TlsOptionsConfig.builder() + .version("TLSv1.2") + .build(); String text = jsonb.toJson(options); @@ -85,7 +87,9 @@ public void shouldReadOptionsWithKeys() @Test public void shouldWriteOptionsWithKeys() { - TlsOptionsConfig options = new TlsOptionsConfig(null, asList("localhost"), null, null, null, null, null, false); + TlsOptionsConfig options = TlsOptionsConfig.builder() + .keys(asList("localhost")) + .build(); String text = jsonb.toJson(options); @@ -110,7 +114,9 @@ public void shouldReadOptionsWithTrust() @Test public void shouldWriteOptionsWithTrust() { - TlsOptionsConfig options = new TlsOptionsConfig(null, null, asList("serverca"), null, null, null, null, false); + TlsOptionsConfig options = TlsOptionsConfig.builder() + .trust(asList("serverca")) + .build(); String text = jsonb.toJson(options); @@ -135,7 +141,9 @@ public void shouldReadOptionsWithTrustcacerts() @Test public void shouldWriteOptionsWithTrustcacerts() { - TlsOptionsConfig options = new TlsOptionsConfig(null, null, null, null, null, null, null, true); + TlsOptionsConfig options = TlsOptionsConfig.builder() + .trustcacerts(true) + .build(); String text = jsonb.toJson(options); @@ -160,7 +168,9 @@ public void shouldReadOptionsWithServerName() @Test public void shouldWriteOptionsWithServerName() { - TlsOptionsConfig options = new TlsOptionsConfig(null, null, null, asList("example.net"), null, null, null, false); + TlsOptionsConfig options = TlsOptionsConfig.builder() + .sni(asList("example.net")) + .build(); String text = jsonb.toJson(options); @@ -185,7 +195,9 @@ public void shouldReadOptionsWithAlpn() @Test public void shouldWriteOptionsWithAlpn() { - TlsOptionsConfig options = new TlsOptionsConfig(null, null, null, null, asList("echo"), null, null, false); + TlsOptionsConfig options = TlsOptionsConfig.builder() + .alpn(asList("echo")) + .build(); String text = jsonb.toJson(options); @@ -210,7 +222,9 @@ public void shouldReadOptionsWithMutual() @Test public void shouldWriteOptionsWithMutual() { - TlsOptionsConfig options = new TlsOptionsConfig(null, null, null, null, null, REQUESTED, null, false); + TlsOptionsConfig options = TlsOptionsConfig.builder() + .mutual(REQUESTED) + .build(); String text = jsonb.toJson(options); @@ -235,8 +249,9 @@ public void shouldReadOptionsWithSigners() @Test public void shouldWriteOptionsWithSigners() { - TlsOptionsConfig options = - new TlsOptionsConfig(null, null, null, null, null, null, asList("clientca"), false); + TlsOptionsConfig options = TlsOptionsConfig.builder() + .signers(asList("clientca")) + .build(); String text = jsonb.toJson(options); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfig.java index db8b662fc1..66b9f2184e 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; public class AttributeConfig { @@ -24,7 +25,12 @@ public class AttributeConfig public final String name; public final String value; - public AttributeConfig( + public static AttributeConfigBuilder builder() + { + return new AttributeConfigBuilder<>(identity()); + } + + AttributeConfig( String name, String value) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfigBuilder.java new file mode 100644 index 0000000000..0eea7cd59c --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfigBuilder.java @@ -0,0 +1,51 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import java.util.function.Function; + +public final class AttributeConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String name; + private String value; + + AttributeConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public AttributeConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public AttributeConfigBuilder value( + String value) + { + this.value = value; + return this; + } + + public T build() + { + return mapper.apply(new AttributeConfig(name, value)); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfig.java index d5a597db6f..1ce100f0b9 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; import java.util.List; import java.util.function.ToLongFunction; @@ -39,7 +40,12 @@ public class BindingConfig public final List routes; public final TelemetryRefConfig telemetryRef; - public BindingConfig( + public static BindingConfigBuilder builder() + { + return new BindingConfigBuilder<>(identity()); + } + + BindingConfig( String vault, String name, String type, diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java new file mode 100644 index 0000000000..00a7d7bdfd --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java @@ -0,0 +1,135 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static java.util.Collections.emptyList; + +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.function.Function; + +public final class BindingConfigBuilder implements ConfigBuilder +{ + public static final List ROUTES_DEFAULT = emptyList(); + + private final Function mapper; + + private String vault; + private String name; + private String type; + private KindConfig kind; + private String entry; + private OptionsConfig options; + private List routes; + private TelemetryRefConfig telemetry; + + BindingConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public BindingConfigBuilder vault( + String vault) + { + this.vault = vault; + return this; + } + + public BindingConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public BindingConfigBuilder type( + String type) + { + this.type = type; + return this; + } + + public BindingConfigBuilder kind( + KindConfig kind) + { + this.kind = kind; + return this; + } + + public BindingConfigBuilder entry( + String entry) + { + this.entry = entry; + return this; + } + + public >> C options( + Function>, C> options) + { + return options.apply(this::options); + } + + public BindingConfigBuilder options( + OptionsConfig options) + { + this.options = options; + return this; + } + + public RouteConfigBuilder> route() + { + return new RouteConfigBuilder<>(this::route); + } + + public BindingConfigBuilder route( + RouteConfig route) + { + if (routes == null) + { + routes = new LinkedList<>(); + } + routes.add(route); + return this; + } + + public TelemetryRefConfigBuilder> telemetry() + { + return new TelemetryRefConfigBuilder<>(this::telemetry); + } + + public BindingConfigBuilder telemetry( + TelemetryRefConfig telemetry) + { + this.telemetry = telemetry; + return this; + } + + @Override + public T build() + { + return mapper.apply(new BindingConfig( + vault, + name, + type, + kind, + entry, + options, + Optional.ofNullable(routes).orElse(ROUTES_DEFAULT), + telemetry)); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigBuilder.java new file mode 100644 index 0000000000..12491b5d63 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigBuilder.java @@ -0,0 +1,21 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +public interface ConfigBuilder +{ + T build(); +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigReader.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigReader.java index 6cd6708943..8e22e20cf5 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigReader.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigReader.java @@ -111,7 +111,7 @@ public NamespaceConfig read( JsonSchema schema = new UniquePropertyKeysSchema(validator.read()); JsonProvider provider = service.createJsonProvider(schema, parser -> handler); - //provider.createReader(reader).read(); + provider.createReader(reader).read(); if (!errors.isEmpty()) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java index ccce543404..ce08b2acae 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.engine.config; +import static com.fasterxml.jackson.dataformat.yaml.YAMLGenerator.Feature.MINIMIZE_QUOTES; +import static com.fasterxml.jackson.dataformat.yaml.YAMLGenerator.Feature.WRITE_DOC_START_MARKER; import static org.agrona.LangUtil.rethrowUnchecked; import java.io.StringWriter; @@ -80,7 +82,11 @@ private void write0( String jsonText = jsonb.toJson(namespace, NamespaceConfig.class); JsonNode json = new ObjectMapper().readTree(jsonText); - new YAMLMapper().writeValue(writer, json); + YAMLMapper mapper = YAMLMapper.builder() + .disable(WRITE_DOC_START_MARKER) + .enable(MINIMIZE_QUOTES) + .build(); + mapper.writeValue(writer, json); if (!errors.isEmpty()) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfig.java index a40e4d7c56..20777fe25d 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; public class ExporterConfig { @@ -25,7 +26,12 @@ public class ExporterConfig public transient long id; - public ExporterConfig( + public static ExporterConfigBuilder builder() + { + return new ExporterConfigBuilder<>(identity()); + } + + ExporterConfig( String name, String type, OptionsConfig options) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfigBuilder.java new file mode 100644 index 0000000000..eeb3bf5503 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfigBuilder.java @@ -0,0 +1,66 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import java.util.function.Function; + +public final class ExporterConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String name; + private String type; + private OptionsConfig options; + + ExporterConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public ExporterConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public ExporterConfigBuilder type( + String type) + { + this.type = type; + return this; + } + + public >> C options( + Function>, C> options) + { + return options.apply(this::options); + } + + public ExporterConfigBuilder options( + OptionsConfig options) + { + this.options = options; + return this; + } + + @Override + public T build() + { + return mapper.apply(new ExporterConfig(name, type, options)); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfig.java index dacd3922da..fcd93b0055 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; import java.util.function.Function; @@ -28,7 +29,12 @@ public class GuardConfig public final String type; public final OptionsConfig options; - public GuardConfig( + public static final GuardConfigBuilder builder() + { + return new GuardConfigBuilder<>(identity()); + } + + GuardConfig( String name, String type, OptionsConfig options) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfigBuilder.java new file mode 100644 index 0000000000..ff1e64dea1 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfigBuilder.java @@ -0,0 +1,66 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import java.util.function.Function; + +public final class GuardConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String name; + private String type; + private OptionsConfig options; + + GuardConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public GuardConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public GuardConfigBuilder type( + String type) + { + this.type = type; + return this; + } + + public >> C options( + Function>, C> options) + { + return options.apply(this::options); + } + + public GuardConfigBuilder options( + OptionsConfig options) + { + this.options = options; + return this; + } + + @Override + public T build() + { + return mapper.apply(new GuardConfig(name, type, options)); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfig.java index dad66c3204..4c0f91c419 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; import java.util.List; import java.util.function.LongFunction; @@ -28,7 +29,12 @@ public class GuardedConfig public final String name; public final List roles; - public GuardedConfig( + public static GuardedConfigBuilder builder() + { + return new GuardedConfigBuilder<>(identity()); + } + + GuardedConfig( String name, List roles) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfigBuilder.java new file mode 100644 index 0000000000..bccffad9ec --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfigBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static java.util.Collections.emptyList; + +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.function.Function; + +public final class GuardedConfigBuilder implements ConfigBuilder +{ + public static final List ROLES_DEFAULT = emptyList(); + + private final Function mapper; + + private String name; + private List roles; + + GuardedConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public GuardedConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public GuardedConfigBuilder role( + String role) + { + if (roles == null) + { + roles = new LinkedList<>(); + } + roles.add(role); + return this; + } + + @Override + public T build() + { + return mapper.apply(new GuardedConfig( + name, + Optional.ofNullable(roles).orElse(ROLES_DEFAULT))); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfig.java index 3240ca3c54..2ad112c7e4 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; public class MetricConfig { @@ -24,7 +25,12 @@ public class MetricConfig public transient long id; - public MetricConfig( + public static MetricConfigBuilder builder() + { + return new MetricConfigBuilder<>(identity()); + } + + MetricConfig( String group, String name) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfigBuilder.java new file mode 100644 index 0000000000..960d644931 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfigBuilder.java @@ -0,0 +1,52 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import java.util.function.Function; + +public final class MetricConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String group; + private String name; + + MetricConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public MetricConfigBuilder group( + String group) + { + this.group = group; + return this; + } + + public MetricConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + @Override + public T build() + { + return mapper.apply(new MetricConfig(group, name)); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfig.java index 90339cb346..da6653c87b 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfig.java @@ -16,12 +16,18 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; public class MetricRefConfig { public final String name; - public MetricRefConfig( + public static MetricRefConfigBuilder builder() + { + return new MetricRefConfigBuilder<>(identity()); + } + + MetricRefConfig( String name) { this.name = requireNonNull(name); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfigBuilder.java new file mode 100644 index 0000000000..c114c7400b --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfigBuilder.java @@ -0,0 +1,43 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import java.util.function.Function; + +public final class MetricRefConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String name; + + MetricRefConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public MetricRefConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public T build() + { + return mapper.apply(new MetricRefConfig(name)); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfig.java index 118857c1d9..afe2322cf2 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfig.java @@ -16,13 +16,12 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; import java.util.List; import java.util.function.Function; import java.util.function.ToLongFunction; -import io.aklivity.zilla.runtime.engine.internal.config.NamespaceRef; - public class NamespaceConfig { public transient int id; @@ -30,15 +29,20 @@ public class NamespaceConfig public transient Function readURL; public final String name; - public final List references; + public final List references; public final TelemetryConfig telemetry; public final List bindings; public final List guards; public final List vaults; - public NamespaceConfig( + public static NamespaceConfigBuilder builder() + { + return new NamespaceConfigBuilder<>(identity()); + } + + NamespaceConfig( String name, - List references, + List references, TelemetryConfig telemetry, List bindings, List guards, diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfigBuilder.java new file mode 100644 index 0000000000..29ce72beaa --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfigBuilder.java @@ -0,0 +1,162 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static java.util.Collections.emptyList; + +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.function.Function; + +public class NamespaceConfigBuilder +{ + public static final List NAMESPACES_DEFAULT = emptyList(); + public static final List BINDINGS_DEFAULT = emptyList(); + public static final List GUARDS_DEFAULT = emptyList(); + public static final List VAULTS_DEFAULT = emptyList(); + public static final TelemetryConfig TELEMETRY_DEFAULT = TelemetryConfig.EMPTY; + + private final Function mapper; + + private String name; + private List namespaces; + private TelemetryConfig telemetry; + private List bindings; + private List guards; + private List vaults; + + NamespaceConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public NamespaceConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public NamespaceRefConfigBuilder> namespace() + { + return new NamespaceRefConfigBuilder<>(this::namespace); + } + + public NamespaceConfigBuilder namespace( + NamespaceRefConfig namespace) + { + if (namespaces == null) + { + namespaces = new LinkedList<>(); + } + namespaces.add(namespace); + return this; + } + + public TelemetryConfigBuilder> telemetry() + { + return new TelemetryConfigBuilder<>(this::telemetry); + } + + public NamespaceConfigBuilder telemetry( + TelemetryConfig telemetry) + { + this.telemetry = telemetry; + return this; + } + + public BindingConfigBuilder> binding() + { + return new BindingConfigBuilder<>(this::binding); + } + + public NamespaceConfigBuilder binding( + BindingConfig binding) + { + if (bindings == null) + { + bindings = new LinkedList<>(); + } + bindings.add(binding); + return this; + } + + public NamespaceConfigBuilder bindings( + List bindings) + { + this.bindings = bindings; + return this; + } + + public GuardConfigBuilder> guard() + { + return new GuardConfigBuilder<>(this::guard); + } + + public NamespaceConfigBuilder guard( + GuardConfig guard) + { + if (guards == null) + { + guards = new LinkedList<>(); + } + guards.add(guard); + return this; + } + + public NamespaceConfigBuilder guards( + List guards) + { + this.guards = guards; + return this; + } + + public VaultConfigBuilder> vault() + { + return new VaultConfigBuilder<>(this::vault); + } + + public NamespaceConfigBuilder vault( + VaultConfig vault) + { + if (vaults == null) + { + vaults = new LinkedList<>(); + } + vaults.add(vault); + return this; + } + + public NamespaceConfigBuilder vaults( + List vaults) + { + this.vaults = vaults; + return this; + } + + public T build() + { + return mapper.apply(new NamespaceConfig( + name, + Optional.ofNullable(namespaces).orElse(NAMESPACES_DEFAULT), + Optional.ofNullable(telemetry).orElse(TELEMETRY_DEFAULT), + Optional.ofNullable(bindings).orElse(BINDINGS_DEFAULT), + Optional.ofNullable(guards).orElse(GUARDS_DEFAULT), + Optional.ofNullable(vaults).orElse(VAULTS_DEFAULT))); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRef.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfig.java similarity index 73% rename from runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRef.java rename to runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfig.java index 7af4999159..9261dd32bc 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRef.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfig.java @@ -13,16 +13,23 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.engine.internal.config; +package io.aklivity.zilla.runtime.engine.config; + +import static java.util.function.Function.identity; import java.util.Map; -public class NamespaceRef +public class NamespaceRefConfig { public final String name; public final Map links; - public NamespaceRef( + public static NamespaceRefConfigBuilder builder() + { + return new NamespaceRefConfigBuilder<>(identity()); + } + + NamespaceRefConfig( String name, Map links) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfigBuilder.java new file mode 100644 index 0000000000..86525681a5 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfigBuilder.java @@ -0,0 +1,67 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static java.util.Collections.emptyMap; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; + +public final class NamespaceRefConfigBuilder implements ConfigBuilder +{ + public static final Map LINKS_DEFAULT = emptyMap(); + + private final Function mapper; + + private String name; + private Map links; + + NamespaceRefConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public NamespaceRefConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public NamespaceRefConfigBuilder link( + String name, + String value) + { + if (links == null) + { + links = new LinkedHashMap<>(); + } + links.put(name, value); + return this; + } + + @Override + public T build() + { + return mapper.apply(new NamespaceRefConfig( + name, + Optional.ofNullable(links).orElse(LINKS_DEFAULT))); + } + +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfig.java index 1463acf76b..47b6d0511c 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfig.java @@ -15,17 +15,14 @@ */ package io.aklivity.zilla.runtime.engine.config; -import static java.util.Collections.emptyList; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; import java.util.List; import java.util.function.LongPredicate; public class RouteConfig { - public static final List WHEN_DEFAULT = emptyList(); - public static final List GUARDED_DEFAULT = emptyList(); - public transient long id; public transient LongPredicate authorized; @@ -35,35 +32,12 @@ public class RouteConfig public final WithConfig with; public final List guarded; - public RouteConfig( - String exit) - { - this(0, exit); - } - - public RouteConfig( - String exit, - List guarded) - { - this(exit, WHEN_DEFAULT, guarded); - } - - public RouteConfig( - String exit, - List when, - List guarded) - { - this(0, exit, when, null, guarded); - } - - public RouteConfig( - int order, - String exit) + public static RouteConfigBuilder builder() { - this(order, exit, WHEN_DEFAULT, null, GUARDED_DEFAULT); + return new RouteConfigBuilder<>(identity()); } - public RouteConfig( + RouteConfig( int order, String exit, List when, diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfigBuilder.java new file mode 100644 index 0000000000..a7c6654609 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfigBuilder.java @@ -0,0 +1,114 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static java.util.Collections.emptyList; + +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.function.Function; + +public final class RouteConfigBuilder implements ConfigBuilder +{ + public static final List WHEN_DEFAULT = emptyList(); + public static final List GUARDED_DEFAULT = emptyList(); + + private final Function mapper; + + private int order; + private String exit; + private List when; + private WithConfig with; + private List guarded; + + RouteConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public RouteConfigBuilder order( + int order) + { + this.order = order; + return this; + } + + public RouteConfigBuilder exit( + String exit) + { + this.exit = exit; + return this; + } + + public >> C when( + Function>, C> condition) + { + return condition.apply(this::when); + } + + public RouteConfigBuilder when( + ConditionConfig condition) + { + if (when == null) + { + when = new LinkedList<>(); + } + when.add(condition); + return this; + } + + public >> B with( + Function>, B> with) + { + return with.apply(this::with); + } + + public RouteConfigBuilder with( + WithConfig with) + { + this.with = with; + return this; + } + + public GuardedConfigBuilder> guarded() + { + return new GuardedConfigBuilder<>(this::guarded); + } + + public RouteConfigBuilder guarded( + GuardedConfig guarded) + { + if (this.guarded == null) + { + this.guarded = new LinkedList<>(); + } + this.guarded.add(guarded); + return this; + } + + @Override + public T build() + { + return mapper.apply(new RouteConfig( + order, + exit, + Optional.ofNullable(when).orElse(WHEN_DEFAULT), + with, + Optional.ofNullable(guarded).orElse(GUARDED_DEFAULT))); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfig.java index 1121768419..8bf612ad38 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfig.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.engine.config; +import static java.util.function.Function.identity; + import java.util.List; public class TelemetryConfig @@ -25,7 +27,12 @@ public class TelemetryConfig public final List metrics; public final List exporters; - public TelemetryConfig( + public static TelemetryConfigBuilder builder() + { + return new TelemetryConfigBuilder<>(identity()); + } + + TelemetryConfig( List attributes, List metrics, List exporters) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfigBuilder.java new file mode 100644 index 0000000000..a576b68c0e --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfigBuilder.java @@ -0,0 +1,97 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.function.Function; + +public final class TelemetryConfigBuilder implements ConfigBuilder +{ + public static final List ATTRIBUTES_DEFAULT = List.of(); + public static final List METRICS_DEFAULT = List.of(); + public static final List EXPORTERS_DEFAULT = List.of(); + + private final Function mapper; + + private List attributes; + private List metrics; + private List exporters; + + TelemetryConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public AttributeConfigBuilder> attribute() + { + return new AttributeConfigBuilder<>(this::attribute); + } + + public TelemetryConfigBuilder attribute( + AttributeConfig attribute) + { + if (attributes == null) + { + attributes = new LinkedList<>(); + } + attributes.add(attribute); + return this; + } + + public MetricConfigBuilder> metric() + { + return new MetricConfigBuilder<>(this::metric); + } + + public TelemetryConfigBuilder metric( + MetricConfig metric) + { + if (metrics == null) + { + metrics = new LinkedList<>(); + } + metrics.add(metric); + return this; + } + + public ExporterConfigBuilder> exporter() + { + return new ExporterConfigBuilder<>(this::exporter); + } + + public TelemetryConfigBuilder exporter( + ExporterConfig exporter) + { + if (exporters == null) + { + exporters = new LinkedList<>(); + } + exporters.add(exporter); + return this; + } + + @Override + public T build() + { + return mapper.apply(new TelemetryConfig( + Optional.ofNullable(attributes).orElse(ATTRIBUTES_DEFAULT), + Optional.ofNullable(metrics).orElse(METRICS_DEFAULT), + Optional.ofNullable(exporters).orElse(EXPORTERS_DEFAULT))); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfig.java index dde4def647..26bdfa0b1d 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfig.java @@ -15,13 +15,20 @@ */ package io.aklivity.zilla.runtime.engine.config; +import static java.util.function.Function.identity; + import java.util.List; public class TelemetryRefConfig { public final List metricRefs; - public TelemetryRefConfig( + public static TelemetryRefConfigBuilder builder() + { + return new TelemetryRefConfigBuilder<>(identity()); + } + + TelemetryRefConfig( List metricRefs) { this.metricRefs = metricRefs; diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfigBuilder.java new file mode 100644 index 0000000000..c59837ebb4 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfigBuilder.java @@ -0,0 +1,55 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import java.util.LinkedList; +import java.util.List; +import java.util.function.Function; + +public final class TelemetryRefConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private List metrics; + + TelemetryRefConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public MetricRefConfigBuilder> metric() + { + return new MetricRefConfigBuilder<>(this::metric); + } + + public TelemetryRefConfigBuilder metric( + MetricRefConfig metric) + { + if (metrics == null) + { + metrics = new LinkedList<>(); + } + metrics.add(metric); + return this; + } + + @Override + public T build() + { + return mapper.apply(new TelemetryRefConfig(metrics)); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfig.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfig.java index bdbfaacd84..2fcc16c197 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfig.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfig.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.config; import static java.util.Objects.requireNonNull; +import static java.util.function.Function.identity; public class VaultConfig { @@ -25,7 +26,12 @@ public class VaultConfig public final String type; public final OptionsConfig options; - public VaultConfig( + public static VaultConfigBuilder builder() + { + return new VaultConfigBuilder<>(identity()); + } + + VaultConfig( String name, String type, OptionsConfig options) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfigBuilder.java new file mode 100644 index 0000000000..66f363ebf7 --- /dev/null +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfigBuilder.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.config; + +import static java.util.Objects.requireNonNull; + +import java.util.function.Function; + +public final class VaultConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String name; + private String type; + private OptionsConfig options; + + VaultConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public VaultConfigBuilder name( + String name) + { + this.name = name; + return this; + } + + public VaultConfigBuilder type( + String type) + { + this.type = requireNonNull(type); + return this; + } + + public >> C options( + Function>, C> options) + { + return options.apply(this::options); + } + + public VaultConfigBuilder options( + OptionsConfig options) + { + this.options = options; + return this; + } + + @Override + public T build() + { + return mapper.apply(new VaultConfig(name, type, options)); + } +} diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/AttributeAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/AttributeAdapter.java index 23580e35a1..1d0b7a4fe8 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/AttributeAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/AttributeAdapter.java @@ -38,12 +38,9 @@ public Map.Entry adaptToJson( public AttributeConfig adaptFromJson( Map.Entry entry) { - return new AttributeConfig(entry.getKey(), asJsonString(entry.getValue())); - } - - private static String asJsonString( - JsonValue value) - { - return ((JsonString) value).getString(); + return AttributeConfig.builder() + .name(entry.getKey()) + .value(JsonString.class.cast(entry.getValue()).getString()) + .build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java index 874fa5cec7..fe056d0855 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java @@ -15,10 +15,8 @@ */ package io.aklivity.zilla.runtime.engine.internal.config; -import static java.util.Collections.emptyList; -import static java.util.stream.Collectors.toList; +import static io.aklivity.zilla.runtime.engine.config.BindingConfigBuilder.ROUTES_DEFAULT; -import java.util.ArrayList; import java.util.LinkedList; import java.util.List; @@ -32,12 +30,10 @@ import org.agrona.collections.MutableInteger; import io.aklivity.zilla.runtime.engine.config.BindingConfig; +import io.aklivity.zilla.runtime.engine.config.BindingConfigBuilder; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; -import io.aklivity.zilla.runtime.engine.config.KindConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -import io.aklivity.zilla.runtime.engine.config.TelemetryRefConfig; public class BindingConfigsAdapter implements JsonbAdapter { @@ -50,8 +46,6 @@ public class BindingConfigsAdapter implements JsonbAdapter ROUTES_DEFAULT = emptyList(); - private final KindAdapter kind; private final RouteAdapter route; private final OptionsAdapter options; @@ -100,9 +94,20 @@ public JsonObject adaptToJson( if (!ROUTES_DEFAULT.equals(binding.routes)) { - JsonArrayBuilder routes = Json.createArrayBuilder(); - binding.routes.forEach(r -> routes.add(route.adaptToJson(r))); - item.add(ROUTES_NAME, routes); + RouteConfig lastRoute = binding.routes.get(binding.routes.size() - 1); + if (lastRoute.exit != null && + lastRoute.guarded.isEmpty() && + lastRoute.when.isEmpty() && + lastRoute.with == null) + { + item.add(EXIT_NAME, lastRoute.exit); + } + else + { + JsonArrayBuilder routes = Json.createArrayBuilder(); + binding.routes.forEach(r -> routes.add(route.adaptToJson(r))); + item.add(ROUTES_NAME, routes); + } } if (binding.telemetryRef != null) @@ -126,49 +131,56 @@ public BindingConfig[] adaptFromJson( for (String name : object.keySet()) { JsonObject item = object.getJsonObject(name); - String type = item.getString(TYPE_NAME); + String type = item.getString(TYPE_NAME); route.adaptType(type); options.adaptType(type); - String vault = item.containsKey(VAULT_NAME) - ? item.getString(VAULT_NAME) - : null; - KindConfig kind = this.kind.adaptFromJson(item.getJsonString(KIND_NAME)); - OptionsConfig opts = item.containsKey(OPTIONS_NAME) ? - options.adaptFromJson(item.getJsonObject(OPTIONS_NAME)) : - null; + BindingConfigBuilder binding = BindingConfig.builder() + .name(name) + .type(type) + .kind(kind.adaptFromJson(item.getJsonString(KIND_NAME))); + + if (item.containsKey(VAULT_NAME)) + { + binding.vault(item.getString(VAULT_NAME)); + } + + if (item.containsKey(OPTIONS_NAME)) + { + binding.options(options.adaptFromJson(item.getJsonObject(OPTIONS_NAME))); + } + MutableInteger order = new MutableInteger(); - List routes = item.containsKey(ROUTES_NAME) - ? item.getJsonArray(ROUTES_NAME) - .stream() - .map(JsonValue::asJsonObject) - .peek(o -> route.adaptFromJsonIndex(order.value++)) - .map(route::adaptFromJson) - .collect(toList()) - : ROUTES_DEFAULT; - - RouteConfig exit = item.containsKey(EXIT_NAME) - ? new RouteConfig(routes.size(), item.getString(EXIT_NAME)) - : null; - - if (exit != null) + if (item.containsKey(ROUTES_NAME)) + { + item.getJsonArray(ROUTES_NAME) + .stream() + .map(JsonValue::asJsonObject) + .peek(o -> route.adaptFromJsonIndex(order.value++)) + .map(route::adaptFromJson) + .forEach(binding::route); + } + + if (item.containsKey(EXIT_NAME)) { - List routesWithExit = new ArrayList<>(); - routesWithExit.addAll(routes); - routesWithExit.add(exit); - routes = routesWithExit; + binding.route() + .order(order.value++) + .exit(item.getString(EXIT_NAME)) + .build(); } - TelemetryRefConfig telemetryRef = item.containsKey(TELEMETRY_NAME) - ? this.telemetryRef.adaptFromJson(item.getJsonObject(TELEMETRY_NAME)) - : null; + if (item.containsKey(TELEMETRY_NAME)) + { + binding.telemetry(telemetryRef.adaptFromJson(item.getJsonObject(TELEMETRY_NAME))); + } - String entry = item.containsKey(ENTRY_NAME) - ? item.getString(ENTRY_NAME) - : null; + if (item.containsKey(ENTRY_NAME)) + { + binding.entry(item.getString(ENTRY_NAME)); + } - bindings.add(new BindingConfig(vault, name, type, kind, entry, opts, routes, telemetryRef)); + bindings.add(binding.build()); } return bindings.toArray(BindingConfig[]::new); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/ExporterAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/ExporterAdapter.java index 4f3f34fd0d..bb1ff43536 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/ExporterAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/ExporterAdapter.java @@ -25,7 +25,6 @@ import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; import io.aklivity.zilla.runtime.engine.config.ExporterConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; public class ExporterAdapter implements JsonbAdapter @@ -68,10 +67,15 @@ public ExporterConfig[] adaptFromJson( for (String name : jsonObject.keySet()) { JsonObject item = jsonObject.getJsonObject(name); + String type = item.getString(TYPE_NAME); options.adaptType(type); - OptionsConfig opts = options.adaptFromJson(item.getJsonObject(OPTIONS_NAME)); - exporters.add(new ExporterConfig(name, type, opts)); + + exporters.add(ExporterConfig.builder() + .name(name) + .type(type) + .options(options.adaptFromJson(item.getJsonObject(OPTIONS_NAME))) + .build()); } return exporters.toArray(ExporterConfig[]::new); } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/GuardAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/GuardAdapter.java index 3b68d3855a..ce682615ca 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/GuardAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/GuardAdapter.java @@ -21,7 +21,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; import io.aklivity.zilla.runtime.engine.config.GuardConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; +import io.aklivity.zilla.runtime.engine.config.GuardConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; public class GuardAdapter @@ -62,10 +62,15 @@ public GuardConfig adaptFromJson( options.adaptType(type); - OptionsConfig opts = object.containsKey(OPTIONS_NAME) ? - options.adaptFromJson(object.getJsonObject(OPTIONS_NAME)) : - null; + GuardConfigBuilder guard = GuardConfig.builder() + .name(name) + .type(type); - return new GuardConfig(name, type, opts); + if (object.containsKey(OPTIONS_NAME)) + { + guard.options(options.adaptFromJson(object.getJsonObject(OPTIONS_NAME))); + } + + return guard.build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/MetricAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/MetricAdapter.java index 8c0e67c907..aae15cdd6a 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/MetricAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/MetricAdapter.java @@ -33,17 +33,15 @@ public JsonValue adaptToJson( @Override public MetricConfig adaptFromJson( - JsonValue jsonValue) + JsonValue value) { - String name = asJsonString(jsonValue); + String name = JsonString.class.cast(value).getString(); String[] parts = name.split("\\."); String group = parts[0]; - return new MetricConfig(group, name); - } - private static String asJsonString( - JsonValue value) - { - return ((JsonString) value).getString(); + return MetricConfig.builder() + .group(group) + .name(name) + .build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/MetricRefAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/MetricRefAdapter.java index 11cb9426b8..340dc253be 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/MetricRefAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/MetricRefAdapter.java @@ -33,14 +33,10 @@ public JsonValue adaptToJson( @Override public MetricRefConfig adaptFromJson( - JsonValue jsonValue) - { - return new MetricRefConfig(asJsonString(jsonValue)); - } - - private static String asJsonString( JsonValue value) { - return ((JsonString) value).getString(); + return MetricRefConfig.builder() + .name(JsonString.class.cast(value).getString()) + .build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceAdapter.java index 658b265384..fa676849df 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceAdapter.java @@ -15,11 +15,13 @@ */ package io.aklivity.zilla.runtime.engine.internal.config; -import static java.util.Collections.emptyList; +import static io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder.BINDINGS_DEFAULT; +import static io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder.GUARDS_DEFAULT; +import static io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder.NAMESPACES_DEFAULT; +import static io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder.TELEMETRY_DEFAULT; +import static io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder.VAULTS_DEFAULT; import java.util.Arrays; -import java.util.List; -import java.util.stream.Collectors; import jakarta.json.Json; import jakarta.json.JsonArrayBuilder; @@ -30,10 +32,8 @@ import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; -import io.aklivity.zilla.runtime.engine.config.GuardConfig; import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; -import io.aklivity.zilla.runtime.engine.config.TelemetryConfig; -import io.aklivity.zilla.runtime.engine.config.VaultConfig; +import io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder; public class NamespaceAdapter implements JsonbAdapter { @@ -44,13 +44,7 @@ public class NamespaceAdapter implements JsonbAdapter NAMESPACES_DEFAULT = emptyList(); - private static final List BINDINGS_DEFAULT = emptyList(); - private static final List GUARDS_DEFAULT = emptyList(); - private static final List VAULTS_DEFAULT = emptyList(); - private static final TelemetryConfig TELEMETRY_DEFAULT = TelemetryConfig.EMPTY; - - private final NamspaceRefAdapter reference; + private final NamspaceRefAdapter namespaceRef; private final TelemetryAdapter telemetry; private final BindingConfigsAdapter binding; private final VaultAdapter vault; @@ -59,7 +53,7 @@ public class NamespaceAdapter implements JsonbAdapter references.add(reference.adaptToJson(r))); + config.references.forEach(r -> references.add(namespaceRef.adaptToJson(r))); object.add(NAMESPACES_NAME, references); } @@ -113,34 +107,43 @@ public JsonObject adaptToJson( public NamespaceConfig adaptFromJson( JsonObject object) { - String name = object.getString(NAME_NAME); - List references = object.containsKey(NAMESPACES_NAME) - ? object.getJsonArray(NAMESPACES_NAME) - .stream().map(JsonValue::asJsonObject) - .map(reference::adaptFromJson) - .collect(Collectors.toList()) - : NAMESPACES_DEFAULT; - TelemetryConfig telemetry0 = object.containsKey(TELEMETRY_NAME) - ? telemetry.adaptFromJson(object.getJsonObject(TELEMETRY_NAME)) - : TELEMETRY_DEFAULT; - List bindings = object.containsKey(BINDINGS_NAME) - ? Arrays.asList(binding.adaptFromJson(object.getJsonObject(BINDINGS_NAME))) - : BINDINGS_DEFAULT; - List guards = object.containsKey(GUARDS_NAME) - ? object.getJsonObject(GUARDS_NAME) - .entrySet() - .stream() - .map(e -> guard.adaptFromJson(e.getKey(), e.getValue().asJsonObject())) - .collect(Collectors.toList()) - : GUARDS_DEFAULT; - List vaults = object.containsKey(VAULTS_NAME) - ? object.getJsonObject(VAULTS_NAME) - .entrySet() - .stream() - .map(e -> vault.adaptFromJson(e.getKey(), e.getValue().asJsonObject())) - .collect(Collectors.toList()) - : VAULTS_DEFAULT; - - return new NamespaceConfig(name, references, telemetry0, bindings, guards, vaults); + NamespaceConfigBuilder namespace = NamespaceConfig.builder(); + + namespace.name(object.getString(NAME_NAME)); + + if (object.containsKey(NAMESPACES_NAME)) + { + object.getJsonArray(NAMESPACES_NAME) + .stream() + .map(JsonValue::asJsonObject) + .map(namespaceRef::adaptFromJson) + .forEach(namespace::namespace); + } + + if (object.containsKey(TELEMETRY_NAME)) + { + namespace.telemetry(telemetry.adaptFromJson(object.getJsonObject(TELEMETRY_NAME))); + } + + if (object.containsKey(BINDINGS_NAME)) + { + namespace.bindings(Arrays.asList(binding.adaptFromJson(object.getJsonObject(BINDINGS_NAME)))); + } + + if (object.containsKey(GUARDS_NAME)) + { + object.getJsonObject(GUARDS_NAME).entrySet().stream() + .map(e -> guard.adaptFromJson(e.getKey(), e.getValue().asJsonObject())) + .forEach(namespace::guard); + } + + if (object.containsKey(VAULTS_NAME)) + { + object.getJsonObject(VAULTS_NAME).entrySet().stream() + .map(e -> vault.adaptFromJson(e.getKey(), e.getValue().asJsonObject())) + .forEach(namespace::vault); + } + + return namespace.build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamspaceRefAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamspaceRefAdapter.java index 46288f06e9..e928964730 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamspaceRefAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/NamspaceRefAdapter.java @@ -15,27 +15,23 @@ */ package io.aklivity.zilla.runtime.engine.internal.config; -import static java.util.Collections.emptyMap; -import static java.util.stream.Collectors.toMap; - -import java.util.Map; +import static io.aklivity.zilla.runtime.engine.config.NamespaceRefConfigBuilder.LINKS_DEFAULT; import jakarta.json.Json; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; import jakarta.json.JsonString; -import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; +import io.aklivity.zilla.runtime.engine.config.NamespaceRefConfig; +import io.aklivity.zilla.runtime.engine.config.NamespaceRefConfigBuilder; -public class NamspaceRefAdapter implements JsonbAdapter +public class NamspaceRefAdapter implements JsonbAdapter { private static final String NAME_NAME = "name"; private static final String LINKS_NAME = "links"; - private static final Map LINKS_DEFAULT = emptyMap(); - public NamspaceRefAdapter( ConfigAdapterContext context) { @@ -43,7 +39,7 @@ public NamspaceRefAdapter( @Override public JsonObject adaptToJson( - NamespaceRef ref) + NamespaceRefConfig ref) { JsonObjectBuilder object = Json.createObjectBuilder(); @@ -60,23 +56,21 @@ public JsonObject adaptToJson( } @Override - public NamespaceRef adaptFromJson( + public NamespaceRefConfig adaptFromJson( JsonObject object) { - String name = object.getString(NAME_NAME); - Map links = object.containsKey(LINKS_NAME) - ? object.getJsonObject(LINKS_NAME) - .entrySet() - .stream() - .collect(toMap(Map.Entry::getKey, e -> asJsonString(e.getValue()))) - : LINKS_DEFAULT; + NamespaceRefConfigBuilder namespace = NamespaceRefConfig.builder(); - return new NamespaceRef(name, links); - } + namespace.name(object.getString(NAME_NAME)); - private static String asJsonString( - JsonValue value) - { - return ((JsonString) value).getString(); + if (object.containsKey(LINKS_NAME)) + { + object.getJsonObject(LINKS_NAME) + .entrySet() + .stream() + .forEach(e -> namespace.link(e.getKey(), JsonString.class.cast(e.getValue()).getString())); + } + + return namespace.build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/RouteAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/RouteAdapter.java index c14ca66dc6..bfa617fa44 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/RouteAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/RouteAdapter.java @@ -15,12 +15,8 @@ */ package io.aklivity.zilla.runtime.engine.internal.config; -import static io.aklivity.zilla.runtime.engine.config.RouteConfig.GUARDED_DEFAULT; -import static io.aklivity.zilla.runtime.engine.config.RouteConfig.WHEN_DEFAULT; - -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; +import static io.aklivity.zilla.runtime.engine.config.RouteConfigBuilder.GUARDED_DEFAULT; +import static io.aklivity.zilla.runtime.engine.config.RouteConfigBuilder.WHEN_DEFAULT; import jakarta.json.Json; import jakarta.json.JsonArrayBuilder; @@ -30,11 +26,11 @@ import jakarta.json.JsonValue; import jakarta.json.bind.adapter.JsonbAdapter; -import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; import io.aklivity.zilla.runtime.engine.config.GuardedConfig; +import io.aklivity.zilla.runtime.engine.config.GuardedConfigBuilder; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -import io.aklivity.zilla.runtime.engine.config.WithConfig; +import io.aklivity.zilla.runtime.engine.config.RouteConfigBuilder; public class RouteAdapter implements JsonbAdapter { @@ -113,37 +109,46 @@ public JsonObject adaptToJson( public RouteConfig adaptFromJson( JsonObject object) { - String newExit = object.containsKey(EXIT_NAME) - ? object.getString(EXIT_NAME) - : null; - List newWhen = object.containsKey(WHEN_NAME) - ? object.getJsonArray(WHEN_NAME) - .stream().map(JsonValue::asJsonObject) - .map(condition::adaptFromJson) - .collect(Collectors.toList()) - : WHEN_DEFAULT; - WithConfig newWith = object.containsKey(WITH_NAME) - ? with.adaptFromJson(object.getJsonObject(WITH_NAME)) - : null; - - List newGuarded = GUARDED_DEFAULT; - if (object.containsKey(GUARDED_NAME)) + RouteConfigBuilder route = RouteConfig.builder() + .order(index); + + if (object.containsKey(EXIT_NAME)) { - newGuarded = new ArrayList<>(); + route.exit(object.getString(EXIT_NAME)); + } + + if (object.containsKey(WHEN_NAME)) + { + object.getJsonArray(WHEN_NAME) + .stream() + .map(JsonValue::asJsonObject) + .map(condition::adaptFromJson) + .forEach(route::when); + } + if (object.containsKey(WITH_NAME)) + { + route.with(with.adaptFromJson(object.getJsonObject(WITH_NAME))); + } + + if (object.containsKey(GUARDED_NAME)) + { JsonObject guarded = object.getJsonObject(GUARDED_NAME); for (String name : guarded.keySet()) { - List roles = guarded.getJsonArray(name) + GuardedConfigBuilder guardedBy = route.guarded() + .name(name); + + guarded.getJsonArray(name) .stream() .map(JsonString.class::cast) .map(JsonString::getString) - .collect(Collectors.toList()); + .forEach(guardedBy::role); - newGuarded.add(new GuardedConfig(name, roles)); + guardedBy.build(); } } - return new RouteConfig(index, newExit, newWhen, newWith, newGuarded); + return route.build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryAdapter.java index 2374b6493c..eb0f448b71 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryAdapter.java @@ -16,9 +16,7 @@ package io.aklivity.zilla.runtime.engine.internal.config; import java.util.Arrays; -import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import jakarta.json.Json; import jakarta.json.JsonArrayBuilder; @@ -30,8 +28,8 @@ import io.aklivity.zilla.runtime.engine.config.AttributeConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; import io.aklivity.zilla.runtime.engine.config.ExporterConfig; -import io.aklivity.zilla.runtime.engine.config.MetricConfig; import io.aklivity.zilla.runtime.engine.config.TelemetryConfig; +import io.aklivity.zilla.runtime.engine.config.TelemetryConfigBuilder; public class TelemetryAdapter implements JsonbAdapter { @@ -77,23 +75,30 @@ public JsonObject adaptToJson( @Override public TelemetryConfig adaptFromJson( - JsonObject jsonObject) + JsonObject object) { - List attributes = jsonObject.containsKey(ATTRIBUTES_NAME) - ? jsonObject.getJsonObject(ATTRIBUTES_NAME).entrySet().stream() - .map(attribute::adaptFromJson) - .collect(Collectors.toList()) - : List.of(); - List metrics = jsonObject.containsKey(METRICS_NAME) - ? jsonObject.getJsonArray(METRICS_NAME).stream() - .map(metric::adaptFromJson) - .collect(Collectors.toList()) - : List.of(); - List exporters = jsonObject.containsKey(EXPORTERS_NAME) - ? Arrays.stream(exporter.adaptFromJson(jsonObject.getJsonObject(EXPORTERS_NAME))) - .collect(Collectors.toList()) - : List.of(); - - return new TelemetryConfig(attributes, metrics, exporters); + TelemetryConfigBuilder telemetry = TelemetryConfig.builder(); + + if (object.containsKey(ATTRIBUTES_NAME)) + { + object.getJsonObject(ATTRIBUTES_NAME).entrySet().stream() + .map(attribute::adaptFromJson) + .forEach(telemetry::attribute); + } + + if (object.containsKey(METRICS_NAME)) + { + object.getJsonArray(METRICS_NAME).stream() + .map(metric::adaptFromJson) + .forEach(telemetry::metric); + } + + if (object.containsKey(EXPORTERS_NAME)) + { + Arrays.stream(exporter.adaptFromJson(object.getJsonObject(EXPORTERS_NAME))) + .forEach(telemetry::exporter); + } + + return telemetry.build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryRefAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryRefAdapter.java index d99e230712..d7b0dd4095 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryRefAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryRefAdapter.java @@ -15,17 +15,14 @@ */ package io.aklivity.zilla.runtime.engine.internal.config; -import java.util.List; -import java.util.stream.Collectors; - import jakarta.json.Json; import jakarta.json.JsonArrayBuilder; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; -import io.aklivity.zilla.runtime.engine.config.MetricRefConfig; import io.aklivity.zilla.runtime.engine.config.TelemetryRefConfig; +import io.aklivity.zilla.runtime.engine.config.TelemetryRefConfigBuilder; public class TelemetryRefAdapter implements JsonbAdapter { @@ -51,13 +48,17 @@ public JsonObject adaptToJson( @Override public TelemetryRefConfig adaptFromJson( - JsonObject jsonObject) + JsonObject object) { - List metricRefs = jsonObject.containsKey(METRICS_NAME) - ? jsonObject.getJsonArray(METRICS_NAME).stream() - .map(metricRef::adaptFromJson) - .collect(Collectors.toList()) - : List.of(); - return new TelemetryRefConfig(metricRefs); + TelemetryRefConfigBuilder telemetry = TelemetryRefConfig.builder(); + + if (object.containsKey(METRICS_NAME)) + { + object.getJsonArray(METRICS_NAME).stream() + .map(metricRef::adaptFromJson) + .forEach(telemetry::metric); + } + + return telemetry.build(); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/VaultAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/VaultAdapter.java index 3045263791..0bc03d7114 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/VaultAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/VaultAdapter.java @@ -20,9 +20,9 @@ import jakarta.json.JsonObjectBuilder; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; import io.aklivity.zilla.runtime.engine.config.VaultConfig; +import io.aklivity.zilla.runtime.engine.config.VaultConfigBuilder; public class VaultAdapter { @@ -59,13 +59,17 @@ public VaultConfig adaptFromJson( JsonObject object) { String type = object.getString(TYPE_NAME); - options.adaptType(type); - OptionsConfig opts = object.containsKey(OPTIONS_NAME) ? - options.adaptFromJson(object.getJsonObject(OPTIONS_NAME)) : - null; + VaultConfigBuilder vault = VaultConfig.builder() + .name(name) + .type(type); + + if (object.containsKey(OPTIONS_NAME)) + { + vault.options(options.adaptFromJson(object.getJsonObject(OPTIONS_NAME))); + } - return new VaultConfig(name, type, opts); + return vault.build(); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java index 30abafa078..76136e3b6d 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java @@ -15,7 +15,7 @@ */ package io.aklivity.zilla.runtime.engine.config; -import static java.util.Collections.emptyList; +import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -29,6 +29,9 @@ import org.mockito.junit.MockitoRule; import org.mockito.quality.Strictness; +import io.aklivity.zilla.runtime.engine.internal.config.ConditionConfigAdapterTest.TestConditionConfig; +import io.aklivity.zilla.runtime.engine.test.internal.binding.config.TestBindingOptionsConfig; + public class ConfigWriterTest { @Rule @@ -48,11 +51,41 @@ public void initYaml() @Test public void shouldWriteNamespace() { - NamespaceConfig config = new NamespaceConfig("test", emptyList(), null, emptyList(), emptyList(), emptyList()); + NamespaceConfig config = NamespaceConfig.builder() + .name("test") + .binding() + .name("test0") + .type("test") + .kind(SERVER) + .options(TestBindingOptionsConfig::builder) + .mode("test") + .build() + .route() + .when(TestConditionConfig::builder) + .match("test") + .build() + .exit("exit0") + .build() + .build() + .build(); String text = yaml.write(config); assertThat(text, not(nullValue())); - assertThat(text, equalTo("---\nname: \"test\"\n")); + assertThat(text, equalTo(String.join("\n", + new String[] { + "name: test", + "bindings:", + " test0:", + " type: test", + " kind: server", + " options:", + " mode: test", + " routes:", + " - exit: exit0", + " when:", + " - match: test", + "" + }))); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java index e416967e6a..edc10a071e 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java @@ -17,8 +17,6 @@ import static io.aklivity.zilla.runtime.engine.config.KindConfig.REMOTE_SERVER; import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; -import static java.util.Collections.emptyList; -import static java.util.Collections.singletonList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyCollectionOf; @@ -28,8 +26,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -import java.util.List; - import jakarta.json.bind.Jsonb; import jakarta.json.bind.JsonbBuilder; import jakarta.json.bind.JsonbConfig; @@ -44,9 +40,7 @@ import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; -import io.aklivity.zilla.runtime.engine.config.MetricRefConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -import io.aklivity.zilla.runtime.engine.config.TelemetryRefConfig; import io.aklivity.zilla.runtime.engine.test.internal.binding.config.TestBindingOptionsConfig; public class BindingConfigsAdapterTest @@ -90,7 +84,14 @@ public void shouldReadBinding() @Test public void shouldWriteBinding() { - BindingConfig[] bindings = { new BindingConfig(null, "test", "test", SERVER, null, null, emptyList(), null) }; + BindingConfig[] bindings = + { + BindingConfig.builder() + .name("test") + .type("test") + .kind(SERVER) + .build() + }; String text = jsonb.toJson(bindings); @@ -126,7 +127,15 @@ public void shouldReadBindingWithVault() @Test public void shouldWriteBindingWithVault() { - BindingConfig[] bindings = { new BindingConfig("test", "test", "test", SERVER, null, null, emptyList(), null) }; + BindingConfig[] bindings = + { + BindingConfig.builder() + .vault("test") + .name("test") + .type("test") + .kind(SERVER) + .build() + }; String text = jsonb.toJson(bindings); @@ -163,7 +172,16 @@ public void shouldReadBindingWithOptions() public void shouldWriteBindingWithOptions() { BindingConfig[] bindings = - { new BindingConfig(null, "test", "test", SERVER, null, new TestBindingOptionsConfig("test"), emptyList(), null) }; + { + BindingConfig.builder() + .name("test") + .type("test") + .kind(SERVER) + .options(TestBindingOptionsConfig::builder) + .mode("test") + .build() + .build() + }; String text = jsonb.toJson(bindings); @@ -199,16 +217,50 @@ public void shouldReadBindingWithRoute() assertThat(bindings[0].routes.get(0).when, empty()); } + @Test + public void shouldWriteBindingWithExit() + { + BindingConfig[] bindings = + { + BindingConfig.builder() + .name("test") + .type("test") + .kind(SERVER) + .route() + .exit("test") + .build() + .build() + }; + + String text = jsonb.toJson(bindings); + + assertThat(text, not(nullValue())); + assertThat(text, equalTo("{\"test\":{\"type\":\"test\",\"kind\":\"server\",\"exit\":\"test\"}}")); + } @Test public void shouldWriteBindingWithRoute() { BindingConfig[] bindings = - { new BindingConfig(null, "test", "test", SERVER, null, null, singletonList(new RouteConfig("test")), null) }; + { + BindingConfig.builder() + .name("test") + .type("test") + .kind(SERVER) + .route() + .exit("test") + .guarded() + .name("test0") + .role("read") + .build() + .build() + .build() + }; String text = jsonb.toJson(bindings); assertThat(text, not(nullValue())); - assertThat(text, equalTo("{\"test\":{\"type\":\"test\",\"kind\":\"server\",\"routes\":[{\"exit\":\"test\"}]}}")); + assertThat(text, equalTo("{\"test\":{\"type\":\"test\",\"kind\":\"server\"," + + "\"routes\":[{\"exit\":\"test\",\"guarded\":{\"test0\":[\"read\"]}}]}}")); } @Test @@ -272,9 +324,19 @@ public void shouldReadBindingWithRemoteServerKind() @Test public void shouldWriteBindingWithTelemetry() { - TelemetryRefConfig telemetry = new TelemetryRefConfig(List.of(new MetricRefConfig("test.counter"))); BindingConfig[] bindings = - { new BindingConfig(null, "test", "test", SERVER, null, null, List.of(), telemetry) }; + { + BindingConfig.builder() + .name("test") + .type("test") + .kind(SERVER) + .telemetry() + .metric() + .name("test.counter") + .build() + .build() + .build() + }; String text = jsonb.toJson(bindings); @@ -287,14 +349,22 @@ public void shouldWriteBindingWithTelemetry() public void shouldWriteBindingWithRemoteServerKind() { BindingConfig[] bindings = - { new BindingConfig(null, "test", "test", REMOTE_SERVER, "test_entry", - null, singletonList(new RouteConfig("test")), null) }; + { + BindingConfig.builder() + .name("test") + .type("test") + .kind(REMOTE_SERVER) + .entry("test_entry") + .route() + .exit("test") + .build() + .build() + }; String text = jsonb.toJson(bindings); assertThat(text, not(nullValue())); assertThat(text, equalTo("{\"test\":{\"type\":\"test\",\"kind\":\"remote_server\"," + - "\"entry\":\"test_entry\",\"routes\":[{\"exit\":\"test\"}]}}")); - + "\"entry\":\"test_entry\",\"exit\":\"test\"}}")); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ConditionConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ConditionConfigAdapterTest.java index 39b9db3b17..ef925a61ac 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ConditionConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ConditionConfigAdapterTest.java @@ -20,6 +20,8 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import java.util.function.Function; + import jakarta.json.Json; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; @@ -32,6 +34,7 @@ import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; public class ConditionConfigAdapterTest { @@ -103,13 +106,50 @@ public static final class TestConditionConfig extends ConditionConfig { public final String match; - public TestConditionConfig( + public static TestConditionConfigBuilder builder() + { + return new TestConditionConfigBuilder<>(TestConditionConfig.class::cast); + } + + public static TestConditionConfigBuilder builder( + Function mapper) + { + return new TestConditionConfigBuilder<>(mapper); + } + + TestConditionConfig( String match) { this.match = match; } } + public static final class TestConditionConfigBuilder implements ConfigBuilder + { + private final Function mapper; + + private String match; + + TestConditionConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TestConditionConfigBuilder match( + String match) + { + this.match = match; + return this; + } + + @Override + public T build() + { + return mapper.apply(new TestConditionConfig(match)); + } + } + public static final class TestConditionConfigAdapter implements ConditionConfigAdapterSpi { private static final String MATCH_NAME = "match"; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceConfigAdapterTest.java index 989d3f225e..cd96cc9547 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceConfigAdapterTest.java @@ -16,9 +16,7 @@ package io.aklivity.zilla.runtime.engine.internal.config; import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; -import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.equalTo; @@ -26,7 +24,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -import java.util.List; +import java.time.Duration; import jakarta.json.bind.Jsonb; import jakarta.json.bind.JsonbBuilder; @@ -40,16 +38,14 @@ import org.mockito.junit.MockitoRule; import org.mockito.quality.Strictness; -import io.aklivity.zilla.runtime.engine.config.AttributeConfig; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; -import io.aklivity.zilla.runtime.engine.config.ExporterConfig; -import io.aklivity.zilla.runtime.engine.config.GuardConfig; -import io.aklivity.zilla.runtime.engine.config.MetricConfig; import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; -import io.aklivity.zilla.runtime.engine.config.TelemetryConfig; +import io.aklivity.zilla.runtime.engine.config.NamespaceRefConfig; import io.aklivity.zilla.runtime.engine.config.VaultConfig; import io.aklivity.zilla.runtime.engine.test.internal.exporter.config.TestExporterOptionsConfig; +import io.aklivity.zilla.runtime.engine.test.internal.guard.config.TestGuardOptionsConfig; +import io.aklivity.zilla.runtime.engine.test.internal.vault.config.TestVaultOptionsConfig; public class NamespaceConfigAdapterTest { @@ -93,13 +89,15 @@ public void shouldReadNamespace() assertThat(config.name, equalTo("test")); assertThat(config.bindings, emptyCollectionOf(BindingConfig.class)); assertThat(config.vaults, emptyCollectionOf(VaultConfig.class)); - assertThat(config.references, emptyCollectionOf(NamespaceRef.class)); + assertThat(config.references, emptyCollectionOf(NamespaceRefConfig.class)); } @Test public void shouldWriteNamespace() { - NamespaceConfig config = new NamespaceConfig("test", emptyList(), null, emptyList(), emptyList(), emptyList()); + NamespaceConfig config = NamespaceConfig.builder() + .name("test") + .build(); String text = jsonb.toJson(config); @@ -135,17 +133,22 @@ public void shouldReadNamespaceWithBinding() assertThat(config.bindings.get(0).type, equalTo("test")); assertThat(config.bindings.get(0).kind, equalTo(SERVER)); assertThat(config.vaults, emptyCollectionOf(VaultConfig.class)); - assertThat(config.references, emptyCollectionOf(NamespaceRef.class)); + assertThat(config.references, emptyCollectionOf(NamespaceRefConfig.class)); } @Test public void shouldWriteNamespaceWithBinding() { - BindingConfig binding = new BindingConfig(null, "test", "test", SERVER, null, null, emptyList(), null); - NamespaceConfig namespace = new NamespaceConfig("test", emptyList(), null, - singletonList(binding), emptyList(), emptyList()); + NamespaceConfig config = NamespaceConfig.builder() + .name("test") + .binding() + .name("test") + .type("test") + .kind(SERVER) + .build() + .build(); - String text = jsonb.toJson(namespace); + String text = jsonb.toJson(config); assertThat(text, not(nullValue())); assertThat(text, equalTo("{\"name\":\"test\",\"bindings\":{\"test\":{\"type\":\"test\",\"kind\":\"server\"}}}")); @@ -181,13 +184,23 @@ public void shouldReadNamespaceWithGuard() @Test public void shouldWriteNamespaceWithGuard() { - GuardConfig guard = new GuardConfig("default", "test", null); - NamespaceConfig config = new NamespaceConfig("test", emptyList(), null, emptyList(), singletonList(guard), emptyList()); + NamespaceConfig config = NamespaceConfig.builder() + .name("test") + .guard() + .name("default") + .type("test") + .options(TestGuardOptionsConfig::builder) + .credentials("token") + .lifetime(Duration.ofSeconds(10)) + .build() + .build() + .build(); String text = jsonb.toJson(config); assertThat(text, not(nullValue())); - assertThat(text, equalTo("{\"name\":\"test\",\"guards\":{\"default\":{\"type\":\"test\"}}}")); + assertThat(text, equalTo("{\"name\":\"test\",\"guards\":{\"default\":{\"type\":\"test\"," + + "\"options\":{\"credentials\":\"token\",\"lifetime\":\"PT10S\"}}}}")); } @Test @@ -220,13 +233,22 @@ public void shouldReadNamespaceWithVault() @Test public void shouldWriteNamespaceWithVault() { - VaultConfig vault = new VaultConfig("default", "test", null); - NamespaceConfig config = new NamespaceConfig("test", emptyList(), null, emptyList(), emptyList(), singletonList(vault)); + NamespaceConfig config = NamespaceConfig.builder() + .name("test") + .vault() + .name("default") + .type("test") + .options(TestVaultOptionsConfig::builder) + .mode("test") + .build() + .build() + .build(); String text = jsonb.toJson(config); assertThat(text, not(nullValue())); - assertThat(text, equalTo("{\"name\":\"test\",\"vaults\":{\"default\":{\"type\":\"test\"}}}")); + assertThat(text, equalTo("{\"name\":\"test\",\"vaults\":{\"default\":{\"type\":\"test\"," + + "\"options\":{\"mode\":\"test\"}}}}")); } @Test @@ -258,12 +280,26 @@ public void shouldReadNamespaceWithTelemetry() @Test public void shouldWriteNamespaceWithTelemetry() { - TelemetryConfig telemetry = new TelemetryConfig( - List.of(new AttributeConfig("test.attribute", "example")), - List.of(new MetricConfig("test", "test.counter")), - List.of(new ExporterConfig("test0", "test", new TestExporterOptionsConfig("test42"))) - ); - NamespaceConfig config = new NamespaceConfig("test", emptyList(), telemetry, emptyList(), emptyList(), emptyList()); + NamespaceConfig config = NamespaceConfig.builder() + .name("test") + .telemetry() + .attribute() + .name("test.attribute") + .value("example") + .build() + .metric() + .group("test") + .name("test.counter") + .build() + .exporter() + .name("test0") + .type("test") + .options(TestExporterOptionsConfig::builder) + .mode("test42") + .build() + .build() + .build() + .build(); String text = jsonb.toJson(config); @@ -303,9 +339,12 @@ public void shouldReadNamespaceWithReference() @Test public void shouldWriteNamespaceWithReference() { - NamespaceRef reference = new NamespaceRef("test", emptyMap()); - NamespaceConfig config = new NamespaceConfig("test", singletonList(reference), null, - emptyList(), emptyList(), emptyList()); + NamespaceConfig config = NamespaceConfig.builder() + .name("test") + .namespace() + .name("test") + .build() + .build(); String text = jsonb.toJson(config); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ReferenceConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRefConfigAdapterTest.java similarity index 81% rename from runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ReferenceConfigAdapterTest.java rename to runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRefConfigAdapterTest.java index 01fa8db581..b47e8d6c0c 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ReferenceConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRefConfigAdapterTest.java @@ -35,11 +35,13 @@ import org.mockito.quality.Strictness; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; +import io.aklivity.zilla.runtime.engine.config.NamespaceRefConfig; -public class ReferenceConfigAdapterTest +public class NamespaceRefConfigAdapterTest { @Rule public MockitoRule rule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS); + @Mock private ConfigAdapterContext context; private Jsonb jsonb; @@ -60,7 +62,7 @@ public void shouldReadReference() "\"name\": \"test\"" + "}"; - NamespaceRef ref = jsonb.fromJson(text, NamespaceRef.class); + NamespaceRefConfig ref = jsonb.fromJson(text, NamespaceRefConfig.class); assertThat(ref, not(nullValue())); assertThat(ref.name, equalTo("test")); @@ -71,9 +73,11 @@ public void shouldReadReference() @Test public void shouldWriteReference() { - NamespaceRef route = new NamespaceRef("test", emptyMap()); + NamespaceRefConfig reference = NamespaceRefConfig.builder() + .name("test") + .build(); - String text = jsonb.toJson(route); + String text = jsonb.toJson(reference); assertThat(text, not(nullValue())); assertThat(text, equalTo("{\"name\":\"test\"}")); @@ -91,7 +95,7 @@ public void shouldReadReferenceWithLink() "}" + "}"; - NamespaceRef ref = jsonb.fromJson(text, NamespaceRef.class); + NamespaceRefConfig ref = jsonb.fromJson(text, NamespaceRefConfig.class); assertThat(ref, not(nullValue())); assertThat(ref.name, equalTo("test")); @@ -102,9 +106,12 @@ public void shouldReadReferenceWithLink() @Test public void shouldWriteReferenceWithLink() { - NamespaceRef route = new NamespaceRef("test", singletonMap("self", "/test")); + NamespaceRefConfig reference = NamespaceRefConfig.builder() + .name("test") + .link("self", "/test") + .build(); - String text = jsonb.toJson(route); + String text = jsonb.toJson(reference); assertThat(text, not(nullValue())); assertThat(text, equalTo("{\"name\":\"test\",\"links\":{\"self\":\"/test\"}}")); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/OptionsConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/OptionsConfigAdapterTest.java index b7d3dc080e..f7f2afaa45 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/OptionsConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/OptionsConfigAdapterTest.java @@ -75,7 +75,9 @@ public void shouldReadOptions() @Test public void shouldWriteOptions() { - OptionsConfig options = new TestBindingOptionsConfig("test"); + OptionsConfig options = TestBindingOptionsConfig.builder() + .mode("test") + .build(); String text = jsonb.toJson(options); @@ -100,7 +102,9 @@ public void shouldReadNullWhenNotAdapting() @Test public void shouldWriteNullWhenNotAdapting() { - OptionsConfig options = new TestBindingOptionsConfig("test"); + OptionsConfig options = TestBindingOptionsConfig.builder() + .mode("test") + .build(); adapter.adaptType(null); String text = jsonb.toJson(options); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/RouteConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/RouteConfigAdapterTest.java index bda6b90f4c..a858d54819 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/RouteConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/RouteConfigAdapterTest.java @@ -15,7 +15,6 @@ */ package io.aklivity.zilla.runtime.engine.internal.config; -import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; @@ -38,7 +37,6 @@ import org.mockito.quality.Strictness; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; -import io.aklivity.zilla.runtime.engine.config.GuardedConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; import io.aklivity.zilla.runtime.engine.internal.config.ConditionConfigAdapterTest.TestConditionConfig; @@ -77,7 +75,9 @@ public void shouldReadRoute() @Test public void shouldWriteRoute() { - RouteConfig route = new RouteConfig("test"); + RouteConfig route = RouteConfig.builder() + .exit("test") + .build(); String text = jsonb.toJson(route); @@ -109,7 +109,13 @@ public void shouldReadRouteGuarded() @Test public void shouldWriteRouteGuarded() { - RouteConfig route = new RouteConfig("test", singletonList(new GuardedConfig("test", singletonList("role")))); + RouteConfig route = RouteConfig.builder() + .exit("test") + .guarded() + .name("test") + .role("role") + .build() + .build(); String text = jsonb.toJson(route); @@ -140,7 +146,12 @@ public void shouldReadRouteWhenMatch() @Test public void shouldWriteRouteWhenMatch() { - RouteConfig route = new RouteConfig("test", singletonList(new TestConditionConfig("test")), emptyList()); + RouteConfig route = RouteConfig.builder() + .exit("test") + .when(TestConditionConfig::builder) + .match("test") + .build() + .build(); String text = jsonb.toJson(route); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryConfigsAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryConfigsAdapterTest.java index 66f2ee160b..6af2852127 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryConfigsAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryConfigsAdapterTest.java @@ -21,8 +21,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -import java.util.List; - import jakarta.json.bind.Jsonb; import jakarta.json.bind.JsonbBuilder; import jakarta.json.bind.JsonbConfig; @@ -35,10 +33,7 @@ import org.mockito.junit.MockitoRule; import org.mockito.quality.Strictness; -import io.aklivity.zilla.runtime.engine.config.AttributeConfig; import io.aklivity.zilla.runtime.engine.config.ConfigAdapterContext; -import io.aklivity.zilla.runtime.engine.config.ExporterConfig; -import io.aklivity.zilla.runtime.engine.config.MetricConfig; import io.aklivity.zilla.runtime.engine.config.TelemetryConfig; import io.aklivity.zilla.runtime.engine.test.internal.exporter.config.TestExporterOptionsConfig; @@ -105,11 +100,20 @@ public void shouldReadTelemetry() public void shouldWriteTelemetry() { // GIVEN - TelemetryConfig telemetry = new TelemetryConfig( - List.of(new AttributeConfig("test.attribute", "example")), - List.of(new MetricConfig("test", "test.counter")), - List.of(new ExporterConfig("test0", "test", null)) - ); + TelemetryConfig telemetry = TelemetryConfig.builder() + .attribute() + .name("test.attribute") + .value("example") + .build() + .metric() + .group("test") + .name("test.counter") + .build() + .exporter() + .name("test0") + .type("test") + .build() + .build(); // WHEN String text = jsonb.toJson(telemetry); @@ -171,11 +175,23 @@ public void shouldReadTelemetryWithExporterOptions() public void shouldWriteTelemetryWithExporterOptions() { // GIVEN - TelemetryConfig telemetry = new TelemetryConfig( - List.of(new AttributeConfig("test.attribute", "example")), - List.of(new MetricConfig("test", "test.counter")), - List.of(new ExporterConfig("test0", "test", new TestExporterOptionsConfig("test42"))) - ); + TelemetryConfig telemetry = TelemetryConfig.builder() + .attribute() + .name("test.attribute") + .value("example") + .build() + .metric() + .group("test") + .name("test.counter") + .build() + .exporter() + .name("test0") + .type("test") + .options(TestExporterOptionsConfig::builder) + .mode("test42") + .build() + .build() + .build(); // WHEN String text = jsonb.toJson(telemetry); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfig.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfig.java index 9e285e8be2..b40a76ec60 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfig.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfig.java @@ -15,13 +15,26 @@ */ package io.aklivity.zilla.runtime.engine.test.internal.binding.config; +import java.util.function.Function; + import io.aklivity.zilla.runtime.engine.config.OptionsConfig; public final class TestBindingOptionsConfig extends OptionsConfig { public final String mode; - public TestBindingOptionsConfig( + public static TestBindingOptionsConfigBuilder builder() + { + return new TestBindingOptionsConfigBuilder<>(TestBindingOptionsConfig.class::cast); + } + + public static TestBindingOptionsConfigBuilder builder( + Function mapper) + { + return new TestBindingOptionsConfigBuilder<>(mapper); + } + + TestBindingOptionsConfig( String mode) { this.mode = mode; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigAdapter.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigAdapter.java index 26f6097f5e..e4b2b09630 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigAdapter.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigAdapter.java @@ -55,10 +55,16 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - String mode = object.containsKey(MODE_NAME) - ? object.getString(MODE_NAME) - : null; + TestBindingOptionsConfigBuilder testOptions = TestBindingOptionsConfig.builder(); - return new TestBindingOptionsConfig(mode); + if (object != null) + { + if (object.containsKey(MODE_NAME)) + { + testOptions.mode(object.getString(MODE_NAME)); + } + } + + return testOptions.build(); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigBuilder.java new file mode 100644 index 0000000000..e85c4d8601 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigBuilder.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.binding.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public final class TestBindingOptionsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String mode; + + TestBindingOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TestBindingOptionsConfigBuilder mode( + String mode) + { + this.mode = mode; + return this; + } + + @Override + public T build() + { + return mapper.apply(new TestBindingOptionsConfig(mode)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfig.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfig.java index 1bb9438eee..989b155ed7 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfig.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfig.java @@ -15,13 +15,26 @@ */ package io.aklivity.zilla.runtime.engine.test.internal.exporter.config; +import java.util.function.Function; + import io.aklivity.zilla.runtime.engine.config.OptionsConfig; public final class TestExporterOptionsConfig extends OptionsConfig { public final String mode; - public TestExporterOptionsConfig( + public static TestExporterOptionsConfigBuilder builder() + { + return new TestExporterOptionsConfigBuilder<>(TestExporterOptionsConfig.class::cast); + } + + public static TestExporterOptionsConfigBuilder builder( + Function mapper) + { + return new TestExporterOptionsConfigBuilder<>(mapper); + } + + TestExporterOptionsConfig( String mode) { this.mode = mode; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigAdapter.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigAdapter.java index caf1cfe4b3..205e21972f 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigAdapter.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigAdapter.java @@ -55,9 +55,16 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - String mode = object != null && object.containsKey(MODE_NAME) - ? object.getString(MODE_NAME) - : null; - return new TestExporterOptionsConfig(mode); + TestExporterOptionsConfigBuilder testOptions = TestExporterOptionsConfig.builder(); + + if (object != null) + { + if (object.containsKey(MODE_NAME)) + { + testOptions.mode(object.getString(MODE_NAME)); + } + } + + return testOptions.build(); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigBuilder.java new file mode 100644 index 0000000000..e77b486a6a --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigBuilder.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.exporter.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public final class TestExporterOptionsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String mode; + + TestExporterOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TestExporterOptionsConfigBuilder mode( + String mode) + { + this.mode = mode; + return this; + } + + @Override + public T build() + { + return mapper.apply(new TestExporterOptionsConfig(mode)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/TestGuardConfig.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/TestGuardConfig.java index e3e1acfb11..31c573b47d 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/TestGuardConfig.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/TestGuardConfig.java @@ -15,16 +15,11 @@ */ package io.aklivity.zilla.runtime.engine.test.internal.guard; -import java.time.Duration; - import io.aklivity.zilla.runtime.engine.config.GuardConfig; import io.aklivity.zilla.runtime.engine.test.internal.guard.config.TestGuardOptionsConfig; public final class TestGuardConfig { - public static final Duration DEFAULT_CHALLENGE_NEVER = Duration.ofMillis(0L); - public static final Duration DEFAULT_LIFETIME_FOREVER = Duration.ofMillis(Long.MAX_VALUE); - public final TestGuardOptionsConfig options; public TestGuardConfig( diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/TestGuardHandler.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/TestGuardHandler.java index b3bb14842a..1e872dfcdd 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/TestGuardHandler.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/TestGuardHandler.java @@ -15,8 +15,8 @@ */ package io.aklivity.zilla.runtime.engine.test.internal.guard; -import static io.aklivity.zilla.runtime.engine.test.internal.guard.TestGuardConfig.DEFAULT_CHALLENGE_NEVER; -import static io.aklivity.zilla.runtime.engine.test.internal.guard.TestGuardConfig.DEFAULT_LIFETIME_FOREVER; +import static io.aklivity.zilla.runtime.engine.test.internal.guard.config.TestGuardOptionsConfigBuilder.DEFAULT_CHALLENGE_NEVER; +import static io.aklivity.zilla.runtime.engine.test.internal.guard.config.TestGuardOptionsConfigBuilder.DEFAULT_LIFETIME_FOREVER; import java.time.Duration; import java.time.Instant; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfig.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfig.java index 581f232355..69d6132e89 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfig.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfig.java @@ -18,6 +18,7 @@ import java.time.Duration; import java.util.List; import java.util.Objects; +import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; @@ -28,7 +29,18 @@ public final class TestGuardOptionsConfig extends OptionsConfig public final Duration challenge; public final List roles; - public TestGuardOptionsConfig( + public static TestGuardOptionsConfigBuilder builder() + { + return new TestGuardOptionsConfigBuilder<>(TestGuardOptionsConfig.class::cast); + } + + public static TestGuardOptionsConfigBuilder builder( + Function mapper) + { + return new TestGuardOptionsConfigBuilder<>(mapper); + } + + TestGuardOptionsConfig( String credentials, Duration lifetime, Duration challenge, diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigAdapter.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigAdapter.java index c14d30b6ba..e713bfd13a 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigAdapter.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigAdapter.java @@ -15,15 +15,12 @@ */ package io.aklivity.zilla.runtime.engine.test.internal.guard.config; -import static io.aklivity.zilla.runtime.engine.test.internal.guard.TestGuardConfig.DEFAULT_CHALLENGE_NEVER; -import static io.aklivity.zilla.runtime.engine.test.internal.guard.TestGuardConfig.DEFAULT_LIFETIME_FOREVER; +import static io.aklivity.zilla.runtime.engine.test.internal.guard.config.TestGuardOptionsConfigBuilder.DEFAULT_CHALLENGE_NEVER; +import static io.aklivity.zilla.runtime.engine.test.internal.guard.config.TestGuardOptionsConfigBuilder.DEFAULT_LIFETIME_FOREVER; import java.time.Duration; -import java.util.ArrayList; -import java.util.List; import jakarta.json.Json; -import jakarta.json.JsonArray; import jakarta.json.JsonArrayBuilder; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; @@ -87,32 +84,34 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - String newCredentials = object.containsKey(CREDENTIALS_NAME) - ? object.getString(CREDENTIALS_NAME) - : null; + TestGuardOptionsConfigBuilder testOptions = TestGuardOptionsConfig.builder(); - Duration newLifetime = object.containsKey(LIFETIME_NAME) - ? Duration.parse(object.getString(LIFETIME_NAME)) - : DEFAULT_LIFETIME_FOREVER; - - Duration newChallenge = object.containsKey(CHALLENGE_NAME) - ? Duration.parse(object.getString(CHALLENGE_NAME)) - : DEFAULT_CHALLENGE_NEVER; - - JsonArray roles = object.containsKey(ROLES_NAME) - ? object.getJsonArray(ROLES_NAME) - : null; - - List newRoles = null; - - if (roles != null) + if (object != null) { - List newRoles0 = new ArrayList<>(); - roles.forEach(v -> - newRoles0.add(JsonString.class.cast(v).getString())); - newRoles = newRoles0; + if (object.containsKey(CREDENTIALS_NAME)) + { + testOptions.credentials(object.getString(CREDENTIALS_NAME)); + } + + if (object.containsKey(LIFETIME_NAME)) + { + testOptions.lifetime(Duration.parse(object.getString(LIFETIME_NAME))); + } + + if (object.containsKey(CHALLENGE_NAME)) + { + testOptions.challenge(Duration.parse(object.getString(CHALLENGE_NAME))); + } + + if (object.containsKey(ROLES_NAME)) + { + object.getJsonArray(ROLES_NAME).stream() + .map(JsonString.class::cast) + .map(JsonString::getString) + .forEach(testOptions::role); + } } - return new TestGuardOptionsConfig(newCredentials, newLifetime, newChallenge, newRoles); + return testOptions.build(); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigBuilder.java new file mode 100644 index 0000000000..a62617822e --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigBuilder.java @@ -0,0 +1,88 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.guard.config; + +import java.time.Duration; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public final class TestGuardOptionsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String credentials; + private Duration lifetime; + private Duration challenge; + private List roles; + + public static final Duration DEFAULT_CHALLENGE_NEVER = Duration.ofMillis(0L); + + public static final Duration DEFAULT_LIFETIME_FOREVER = Duration.ofMillis(Long.MAX_VALUE); + + TestGuardOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TestGuardOptionsConfigBuilder credentials( + String credentials) + { + this.credentials = credentials; + return this; + } + + public TestGuardOptionsConfigBuilder lifetime( + Duration lifetime) + { + this.lifetime = lifetime; + return this; + } + + public TestGuardOptionsConfigBuilder challenge( + Duration challenge) + { + this.challenge = Objects.requireNonNull(challenge); + return this; + } + + public TestGuardOptionsConfigBuilder role( + String role) + { + if (roles == null) + { + roles = new LinkedList<>(); + } + roles.add(role); + return this; + } + + @Override + public T build() + { + return mapper.apply(new TestGuardOptionsConfig( + credentials, + Optional.ofNullable(lifetime).orElse(DEFAULT_LIFETIME_FOREVER), + Optional.ofNullable(challenge).orElse(DEFAULT_CHALLENGE_NEVER), + roles)); + } +} diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfig.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfig.java index 024fbef878..a763bb3b0c 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfig.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfig.java @@ -15,13 +15,26 @@ */ package io.aklivity.zilla.runtime.engine.test.internal.vault.config; +import java.util.function.Function; + import io.aklivity.zilla.runtime.engine.config.OptionsConfig; public final class TestVaultOptionsConfig extends OptionsConfig { public final String mode; - public TestVaultOptionsConfig( + public static TestVaultOptionsConfigBuilder builder() + { + return new TestVaultOptionsConfigBuilder<>(TestVaultOptionsConfig.class::cast); + } + + public static TestVaultOptionsConfigBuilder builder( + Function mapper) + { + return new TestVaultOptionsConfigBuilder<>(mapper); + } + + TestVaultOptionsConfig( String mode) { this.mode = mode; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigAdapter.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigAdapter.java index e9397fa9db..46eac521ba 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigAdapter.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigAdapter.java @@ -55,10 +55,16 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - String mode = object.containsKey(MODE_NAME) - ? object.getString(MODE_NAME) - : null; + TestVaultOptionsConfigBuilder testOptions = TestVaultOptionsConfig.builder(); - return new TestVaultOptionsConfig(mode); + if (object != null) + { + if (object.containsKey(MODE_NAME)) + { + testOptions.mode(object.getString(MODE_NAME)); + } + } + + return testOptions.build(); } } diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigBuilder.java new file mode 100644 index 0000000000..4007f52d01 --- /dev/null +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigBuilder.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.engine.test.internal.vault.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public final class TestVaultOptionsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String mode; + + TestVaultOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public TestVaultOptionsConfigBuilder mode( + String mode) + { + this.mode = mode; + return this; + } + + @Override + public T build() + { + return mapper.apply(new TestVaultOptionsConfig(mode)); + } +} diff --git a/runtime/engine/src/test/resources/io/aklivity/zilla/runtime/engine/internal/EngineTest-duplicate-key.broken.json b/runtime/engine/src/test/resources/io/aklivity/zilla/runtime/engine/internal/EngineTest-duplicate-key.broken.json index f826157e37..1218659165 100644 --- a/runtime/engine/src/test/resources/io/aklivity/zilla/runtime/engine/internal/EngineTest-duplicate-key.broken.json +++ b/runtime/engine/src/test/resources/io/aklivity/zilla/runtime/engine/internal/EngineTest-duplicate-key.broken.json @@ -1,4 +1,5 @@ { + "name": "duplicate-key", "bindings": { "test0": diff --git a/runtime/exporter-prometheus/src/test/java/io/aklivity/zilla/runtime/exporter/prometheus/internal/PrometheusExporterHandlerTest.java b/runtime/exporter-prometheus/src/test/java/io/aklivity/zilla/runtime/exporter/prometheus/internal/PrometheusExporterHandlerTest.java index 4e7230f6ca..0068e0c0da 100644 --- a/runtime/exporter-prometheus/src/test/java/io/aklivity/zilla/runtime/exporter/prometheus/internal/PrometheusExporterHandlerTest.java +++ b/runtime/exporter-prometheus/src/test/java/io/aklivity/zilla/runtime/exporter/prometheus/internal/PrometheusExporterHandlerTest.java @@ -51,7 +51,11 @@ public void shouldStart() throws Exception EngineContext context = mock(EngineContext.class); PrometheusEndpointConfig endpoint = new PrometheusEndpointConfig("http", 4242, "/metrics"); PrometheusOptionsConfig options = new PrometheusOptionsConfig(new PrometheusEndpointConfig[]{endpoint}); - ExporterConfig exporter = new ExporterConfig("test0", "prometheus", options); + ExporterConfig exporter = ExporterConfig.builder() + .name("test0") + .type("prometheus") + .options(options) + .build(); PrometheusExporterConfig prometheusExporter = new PrometheusExporterConfig(exporter); Collector collector = mock(Collector.class); when(collector.counterIds()).thenReturn(new long[][]{}); diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfig.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfig.java index 4b71b9a2a9..36cc97c244 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfig.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfig.java @@ -14,6 +14,8 @@ */ package io.aklivity.zilla.runtime.guard.jwt.config; +import static java.util.function.Function.identity; + public class JwtKeyConfig { public final String alg; @@ -26,7 +28,12 @@ public class JwtKeyConfig public final String x; public final String y; - public JwtKeyConfig( + public static JwtKeyConfigBuilder builder() + { + return new JwtKeyConfigBuilder<>(identity()); + } + + JwtKeyConfig( String kty, String kid, String use, diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfigBuilder.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfigBuilder.java new file mode 100644 index 0000000000..f53515dab4 --- /dev/null +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfigBuilder.java @@ -0,0 +1,109 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.guard.jwt.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public class JwtKeyConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String alg; + private String kty; + private String kid; + private String use; + private String n; + private String e; + private String crv; + private String x; + private String y; + + JwtKeyConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public JwtKeyConfigBuilder kty( + String kty) + { + this.kty = kty; + return this; + } + + public JwtKeyConfigBuilder kid( + String kid) + { + this.kid = kid; + return this; + } + + public JwtKeyConfigBuilder use( + String use) + { + this.use = use; + return this; + } + + public JwtKeyConfigBuilder n( + String n) + { + this.n = n; + return this; + } + + public JwtKeyConfigBuilder e( + String e) + { + this.e = e; + return this; + } + + public JwtKeyConfigBuilder alg( + String alg) + { + this.alg = alg; + return this; + } + + public JwtKeyConfigBuilder crv( + String crv) + { + this.crv = crv; + return this; + } + + public JwtKeyConfigBuilder x( + String x) + { + this.x = x; + return this; + } + + public JwtKeyConfigBuilder y( + String y) + { + this.y = y; + return this; + } + + @Override + public T build() + { + return mapper.apply(new JwtKeyConfig(kty, kid, use, n, e, alg, crv, x, y)); + } +} diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfig.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfig.java index 5e1481707c..2ac7787f91 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfig.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfig.java @@ -19,6 +19,7 @@ import java.time.Duration; import java.util.List; import java.util.Optional; +import java.util.function.Function; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; @@ -28,24 +29,25 @@ public class JwtOptionsConfig extends OptionsConfig public final String audience; public final List keys; public final Optional challenge; - public final Optional keysURL; - public JwtOptionsConfig( - String issuer, - String audience, - List keys, - Duration challenge) + public static JwtOptionsConfigBuilder builder() + { + return new JwtOptionsConfigBuilder<>(JwtOptionsConfig.class::cast); + } + + public static JwtOptionsConfigBuilder builder( + Function mapper) { - this(issuer, audience, keys, challenge, null); + return new JwtOptionsConfigBuilder<>(mapper); } - public JwtOptionsConfig( - String issuer, - String audience, - List keys, - Duration challenge, - String keysURL) + JwtOptionsConfig( + String issuer, + String audience, + List keys, + Duration challenge, + String keysURL) { this.issuer = issuer; this.audience = audience; diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfigBuilder.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfigBuilder.java new file mode 100644 index 0000000000..b2b2e6cf3d --- /dev/null +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfigBuilder.java @@ -0,0 +1,97 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.guard.jwt.config; + +import java.time.Duration; +import java.util.LinkedList; +import java.util.List; +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public class JwtOptionsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String issuer; + private String audience; + private List keys; + private Duration challenge; + private String keysURL; + + JwtOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public JwtOptionsConfigBuilder issuer( + String issuer) + { + this.issuer = issuer; + return this; + } + + public JwtOptionsConfigBuilder audience( + String audience) + { + this.audience = audience; + return this; + } + + public JwtOptionsConfigBuilder challenge( + Duration challenge) + { + this.challenge = challenge; + return this; + } + + public JwtOptionsConfigBuilder keys( + List keys) + { + this.keys = keys; + return this; + } + + public JwtKeyConfigBuilder> key() + { + return new JwtKeyConfigBuilder<>(this::key); + } + + public JwtOptionsConfigBuilder key( + JwtKeyConfig key) + { + if (keys == null) + { + keys = new LinkedList<>(); + } + keys.add(key); + return this; + } + + public JwtOptionsConfigBuilder keysURL( + String keysURL) + { + this.keysURL = keysURL; + return this; + } + + @Override + public T build() + { + return mapper.apply(new JwtOptionsConfig(issuer, audience, keys, challenge, keysURL)); + } +} diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfigAdapter.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfigAdapter.java index 2a61619941..46591af28a 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfigAdapter.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtKeyConfigAdapter.java @@ -20,6 +20,7 @@ import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfigBuilder; public final class JwtKeyConfigAdapter implements JsonbAdapter { @@ -85,18 +86,45 @@ public JsonObject adaptToJson( public JwtKeyConfig adaptFromJson( JsonObject object) { - String kty = object.getString(KTY_NAME); - String kid = object.getString(KID_NAME); - String use = object.containsKey(USE_NAME) ? object.getString(USE_NAME) : null; + JwtKeyConfigBuilder jwtKey = JwtKeyConfig.builder() + .kty(object.getString(KTY_NAME)) + .kid(object.getString(KID_NAME)); - String n = object.containsKey(N_NAME) ? object.getString(N_NAME) : null; - String e = object.containsKey(E_NAME) ? object.getString(E_NAME) : null; - String alg = object.containsKey(ALG_NAME) ? object.getString(ALG_NAME) : null; + if (object.containsKey(USE_NAME)) + { + jwtKey.use(object.getString(USE_NAME)); + } + + if (object.containsKey(N_NAME)) + { + jwtKey.n(object.getString(N_NAME)); + } - String crv = object.containsKey(CRV_NAME) ? object.getString(CRV_NAME) : null; - String x = object.containsKey(X_NAME) ? object.getString(X_NAME) : null; - String y = object.containsKey(Y_NAME) ? object.getString(Y_NAME) : null; + if (object.containsKey(E_NAME)) + { + jwtKey.e(object.getString(E_NAME)); + } + + if (object.containsKey(ALG_NAME)) + { + jwtKey.alg(object.getString(ALG_NAME)); + } + + if (object.containsKey(CRV_NAME)) + { + jwtKey.crv(object.getString(CRV_NAME)); + } + + if (object.containsKey(X_NAME)) + { + jwtKey.x(object.getString(X_NAME)); + } + + if (object.containsKey(Y_NAME)) + { + jwtKey.y(object.getString(Y_NAME)); + } - return new JwtKeyConfig(kty, kid, use, n, e, alg, crv, x, y); + return jwtKey.build(); } } diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapter.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapter.java index 4c0077ccb1..8c6330c1b3 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapter.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapter.java @@ -15,12 +15,10 @@ package io.aklivity.zilla.runtime.guard.jwt.internal.config; import static java.util.Collections.emptyList; -import static java.util.stream.Collectors.toList; import java.time.Duration; import java.util.List; - import jakarta.json.Json; import jakarta.json.JsonArrayBuilder; import jakarta.json.JsonObject; @@ -33,6 +31,7 @@ import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfigBuilder; import io.aklivity.zilla.runtime.guard.jwt.internal.JwtGuard; public final class JwtOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter @@ -100,47 +99,56 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - String issuer = object.containsKey(ISSUER_NAME) - ? object.getString(ISSUER_NAME) - : null; + JwtOptionsConfigBuilder jwtOptions = JwtOptionsConfig.builder(); - String audience = object.containsKey(AUDIENCE_NAME) - ? object.getString(AUDIENCE_NAME) - : null; + String issuer = object.containsKey(ISSUER_NAME) ? object.getString(ISSUER_NAME) : null; + if (issuer != null) + { + jwtOptions.issuer(issuer); + } + + if (object.containsKey(AUDIENCE_NAME)) + { + jwtOptions.audience(object.getString(AUDIENCE_NAME)); + } - List keys = KEYS_DEFAULT; - String keysURL = null; if (object.containsKey(KEYS_NAME)) { JsonValue keysValue = object.getValue(String.format("/%s", KEYS_NAME)); switch (keysValue.getValueType()) { case ARRAY: - keys = keysValue.asJsonArray() - .stream() - .map(JsonValue::asJsonObject) - .map(key::adaptFromJson) - .collect(toList()); + keysValue.asJsonArray() + .stream() + .map(JsonValue::asJsonObject) + .map(key::adaptFromJson) + .forEach(jwtOptions::key); break; case STRING: - keysURL = ((JsonString) keysValue).getString(); + jwtOptions.keys(KEYS_DEFAULT) + .keysURL(((JsonString) keysValue).getString()); + break; + default: break; } } else { + jwtOptions.keys(KEYS_DEFAULT); + if (issuer != null) { - keysURL = issuer.endsWith("/") + jwtOptions.keysURL(issuer.endsWith("/") ? String.format("%s.well-known/jwks.json", issuer) - : String.format("%s/.well-known/jwks.json", issuer); + : String.format("%s/.well-known/jwks.json", issuer)); } } - Duration challenge = object.containsKey(CHALLENGE_NAME) - ? Duration.ofSeconds(object.getInt(CHALLENGE_NAME)) - : null; + if (object.containsKey(CHALLENGE_NAME)) + { + jwtOptions.challenge(Duration.ofSeconds(object.getInt(CHALLENGE_NAME))); + } - return new JwtOptionsConfig(issuer, audience, keys, challenge, keysURL); + return jwtOptions.build(); } } diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java index a859687c1a..90e9928cb4 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java @@ -18,7 +18,6 @@ import static io.aklivity.zilla.specs.guard.jwt.keys.JwtKeys.RFC7515_RS256; import static java.time.Duration.ofSeconds; import static java.util.Arrays.asList; -import static java.util.Collections.singletonList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -46,8 +45,12 @@ public class JwtGuardHandlerTest public void shouldAuthorize() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -74,8 +77,12 @@ public void shouldAuthorize() throws Exception public void shouldChallengeDuringChallengeWindow() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -98,8 +105,12 @@ public void shouldChallengeDuringChallengeWindow() throws Exception public void shouldNotChallengeDuringWindowWithoutSubject() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -121,8 +132,12 @@ public void shouldNotChallengeDuringWindowWithoutSubject() throws Exception public void shouldNotChallengeBeforeChallengeWindow() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -145,8 +160,12 @@ public void shouldNotChallengeBeforeChallengeWindow() throws Exception public void shouldNotChallengeAgainDuringChallengeWindow() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -170,7 +189,11 @@ public void shouldNotChallengeAgainDuringChallengeWindow() throws Exception @Test public void shouldNotAuthorizeWhenAlgorithmDiffers() throws Exception { - JwtOptionsConfig options = new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), null); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); JwtClaims claims = new JwtClaims(); @@ -187,7 +210,11 @@ public void shouldNotAuthorizeWhenAlgorithmDiffers() throws Exception @Test public void shouldNotAuthorizeWhenSignatureInvalid() throws Exception { - JwtOptionsConfig options = new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), null); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); JwtClaims claims = new JwtClaims(); @@ -206,7 +233,11 @@ public void shouldNotAuthorizeWhenSignatureInvalid() throws Exception @Test public void shouldNotAuthorizeWhenIssuerDiffers() throws Exception { - JwtOptionsConfig options = new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), null); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); JwtClaims claims = new JwtClaims(); @@ -223,7 +254,11 @@ public void shouldNotAuthorizeWhenIssuerDiffers() throws Exception @Test public void shouldNotAuthorizeWhenAudienceDiffers() throws Exception { - JwtOptionsConfig options = new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), null); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); JwtClaims claims = new JwtClaims(); @@ -240,7 +275,11 @@ public void shouldNotAuthorizeWhenAudienceDiffers() throws Exception @Test public void shouldNotAuthorizeWhenExpired() throws Exception { - JwtOptionsConfig options = new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), null); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -260,7 +299,11 @@ public void shouldNotAuthorizeWhenExpired() throws Exception @Test public void shouldNotAuthorizeWhenNotYetValid() throws Exception { - JwtOptionsConfig options = new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), null); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -281,8 +324,12 @@ public void shouldNotAuthorizeWhenNotYetValid() throws Exception public void shouldNotVerifyAuthorizedWhenRolesInsufficient() throws Exception { Duration challenge = ofSeconds(30L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); JwtClaims claims = new JwtClaims(); @@ -302,8 +349,12 @@ public void shouldNotVerifyAuthorizedWhenRolesInsufficient() throws Exception public void shouldReauthorizeWhenExpirationLater() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -332,8 +383,12 @@ public void shouldReauthorizeWhenExpirationLater() throws Exception public void shouldReauthorizeWhenScopeBroader() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -363,8 +418,12 @@ public void shouldReauthorizeWhenScopeBroader() throws Exception public void shouldNotReauthorizeWhenExpirationEarlier() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -393,8 +452,12 @@ public void shouldNotReauthorizeWhenExpirationEarlier() throws Exception public void shouldNotReauthorizeWhenScopeNarrower() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -425,8 +488,12 @@ public void shouldNotReauthorizeWhenScopeNarrower() throws Exception public void shouldNotReauthorizeWhenSubjectDiffers() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -457,8 +524,12 @@ public void shouldNotReauthorizeWhenSubjectDiffers() throws Exception public void shouldNotReauthorizeWhenContextDiffers() throws Exception { Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); @@ -488,8 +559,12 @@ public void shouldNotReauthorizeWhenContextDiffers() throws Exception public void shouldDeauthorize() throws Exception { Duration challenge = ofSeconds(30L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build(); JwtGuardHandler guard = new JwtGuardHandler(options, new MutableLong(1L)::getAndIncrement, READ_KEYS_URL); Instant now = Instant.now(); diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java index ccf797c2a0..be99860433 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java @@ -18,8 +18,6 @@ import static io.aklivity.zilla.runtime.guard.jwt.internal.keys.JwtKeyConfigs.RFC7515_RS256_CONFIG; import static io.aklivity.zilla.specs.guard.jwt.keys.JwtKeys.RFC7515_RS256; import static java.time.Duration.ofSeconds; -import static java.util.Arrays.asList; -import static java.util.Collections.singletonList; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -49,7 +47,11 @@ public class JwtGuardTest @Test public void shouldNotVerifyMissingContext() throws Exception { - GuardedConfig guarded = new GuardedConfig("test0", asList("read:stream", "write:stream")); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .role("read:stream") + .role("write:stream") + .build(); Configuration config = new Configuration(); GuardFactory factory = GuardFactory.instantiate(); @@ -67,7 +69,11 @@ public void shouldNotVerifyMissingHandler() throws Exception when(engine.index()).thenReturn(0); - GuardedConfig guarded = new GuardedConfig("test0", asList("read:stream", "write:stream")); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .role("read:stream") + .role("write:stream") + .build(); Configuration config = new Configuration(); GuardFactory factory = GuardFactory.instantiate(); @@ -87,14 +93,22 @@ public void shouldNotVerifyMissingSession() throws Exception when(engine.index()).thenReturn(0); - GuardedConfig guarded = new GuardedConfig("test0", asList("read:stream", "write:stream")); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .role("read:stream") + .role("write:stream") + .build(); Configuration config = new Configuration(); GuardFactory factory = GuardFactory.instantiate(); Guard guard = factory.create("jwt", config); GuardContext context = guard.supply(engine); - context.attach(new GuardConfig("test0", "jwt", new JwtOptionsConfig(null, null, null, null))); + context.attach(GuardConfig.builder() + .name("test0") + .type("jwt") + .options(JwtOptionsConfig.builder().build()) + .build()); LongPredicate verifier = guard.verifier(s -> 0, guarded); @@ -108,7 +122,11 @@ public void shouldNotVerifyRolesWhenInsufficient() throws Exception when(engine.index()).thenReturn(0); - GuardedConfig guarded = new GuardedConfig("test0", asList("read:stream", "write:stream")); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .role("read:stream") + .role("write:stream") + .build(); Configuration config = new Configuration(); GuardFactory factory = GuardFactory.instantiate(); @@ -116,10 +134,16 @@ public void shouldNotVerifyRolesWhenInsufficient() throws Exception GuardContext context = guard.supply(engine); - Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); - GuardHandler handler = context.attach(new GuardConfig("test0", "jwt", options)); + GuardHandler handler = context.attach(GuardConfig.builder() + .name("test0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(ofSeconds(3L)) + .build() + .build()); LongPredicate verifier = guard.verifier(s -> 0, guarded); @@ -147,7 +171,11 @@ public void shouldVerifyRolesWhenExact() throws Exception when(engine.index()).thenReturn(0); when(engine.supplyAuthorizedId()).thenReturn(1L); - GuardedConfig guarded = new GuardedConfig("test0", asList("read:stream", "write:stream")); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .role("read:stream") + .role("write:stream") + .build(); Configuration config = new Configuration(); GuardFactory factory = GuardFactory.instantiate(); @@ -155,10 +183,16 @@ public void shouldVerifyRolesWhenExact() throws Exception GuardContext context = guard.supply(engine); - Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); - GuardHandler handler = context.attach(new GuardConfig("test0", "jwt", options)); + GuardHandler handler = context.attach(GuardConfig.builder() + .name("test0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(ofSeconds(3L)) + .build() + .build()); LongPredicate verifier = guard.verifier(s -> 0, guarded); @@ -186,7 +220,10 @@ public void shouldVerifyRolesWhenSuperset() throws Exception when(engine.index()).thenReturn(0); when(engine.supplyAuthorizedId()).thenReturn(1L); - GuardedConfig guarded = new GuardedConfig("test0", asList("read:stream")); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .role("read:stream") + .build(); Configuration config = new Configuration(); GuardFactory factory = GuardFactory.instantiate(); @@ -194,10 +231,16 @@ public void shouldVerifyRolesWhenSuperset() throws Exception GuardContext context = guard.supply(engine); - Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); - GuardHandler handler = context.attach(new GuardConfig("test0", "jwt", options)); + GuardHandler handler = context.attach(GuardConfig.builder() + .name("test0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(ofSeconds(3L)) + .build() + .build()); LongPredicate verifier = guard.verifier(s -> 0, guarded); @@ -225,7 +268,9 @@ public void shouldVerifyRolesWhenEmpty() throws Exception when(engine.index()).thenReturn(0); when(engine.supplyAuthorizedId()).thenReturn(1L); - GuardedConfig guarded = new GuardedConfig("test0", asList()); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .build(); Configuration config = new Configuration(); GuardFactory factory = GuardFactory.instantiate(); @@ -233,10 +278,16 @@ public void shouldVerifyRolesWhenEmpty() throws Exception GuardContext context = guard.supply(engine); - Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); - GuardHandler handler = context.attach(new GuardConfig("test0", "jwt", options)); + GuardHandler handler = context.attach(GuardConfig.builder() + .name("test0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(ofSeconds(3L)) + .build() + .build()); LongPredicate verifier = guard.verifier(s -> 0, guarded); @@ -269,14 +320,22 @@ public void shouldVerifyWhenIndexDiffers() throws Exception GuardContext context = guard.supply(engine); - Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); - GuardConfig config = new GuardConfig("test0", "jwt", options); + GuardConfig config = GuardConfig.builder() + .name("test0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(ofSeconds(3L)) + .build() + .build(); config.id = 0x11L; GuardHandler handler = context.attach(config); - GuardedConfig guarded = new GuardedConfig("test0", asList()); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .build(); guarded.id = config.id; LongPredicate verifier = guard.verifier(id -> (int)(id >> 4), guarded); @@ -304,7 +363,9 @@ public void shouldIdentify() throws Exception when(engine.index()).thenReturn(0); when(engine.supplyAuthorizedId()).thenReturn(1L); - GuardedConfig guarded = new GuardedConfig("test0", asList()); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .build(); Configuration config = new Configuration(); GuardFactory factory = GuardFactory.instantiate(); @@ -312,10 +373,16 @@ public void shouldIdentify() throws Exception GuardContext context = guard.supply(engine); - Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); - GuardHandler handler = context.attach(new GuardConfig("test0", "jwt", options)); + GuardHandler handler = context.attach(GuardConfig.builder() + .name("test0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(ofSeconds(3L)) + .build() + .build()); LongFunction identifier = guard.identifier(s -> 0, guarded); @@ -349,12 +416,21 @@ public void shouldIdentifyWhenIndexDiffers() throws Exception GuardContext context = guard.supply(engine); Duration challenge = ofSeconds(3L); - JwtOptionsConfig options = - new JwtOptionsConfig("test issuer", "testAudience", singletonList(RFC7515_RS256_CONFIG), challenge); - GuardConfig config = new GuardConfig("test0", "jwt", options); + GuardConfig config = GuardConfig.builder() + .name("test0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("test issuer") + .audience("testAudience") + .key(RFC7515_RS256_CONFIG) + .challenge(challenge) + .build() + .build(); config.id = 0x11L; - GuardedConfig guarded = new GuardedConfig("test0", asList()); + GuardedConfig guarded = GuardedConfig.builder() + .name("test0") + .build(); guarded.id = config.id; GuardHandler handler = context.attach(config); diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapterTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapterTest.java index cd238e5577..e1df7ba43a 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapterTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/config/JwtOptionsConfigAdapterTest.java @@ -14,7 +14,6 @@ */ package io.aklivity.zilla.runtime.guard.jwt.internal.config; -import static java.util.Arrays.asList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -31,7 +30,6 @@ import org.junit.Before; import org.junit.Test; -import io.aklivity.zilla.runtime.guard.jwt.config.JwtKeyConfig; import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; public class JwtOptionsConfigAdapterTest @@ -106,24 +104,31 @@ public void shouldReadOptions() @Test public void shouldWriteOptions() { - JwtKeyConfig key0 = new JwtKeyConfig( - "EC", "1", "enc", - null, null, null, - "P-256", "MKBCTNIcKUSDii11ySs3526iDZ8AiTo7Tu6KPAqv7D4", "4Etl6SRW2YiLUrN5vfvVHuhp7x8PxltmWWlbbM4IFyM"); - JwtKeyConfig key1 = new JwtKeyConfig( - "RSA", "2011-04-29", null, - "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx" + - "4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMs" + - "tn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2" + - "QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbI" + - "SD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqb" + - "w0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw", "AQAB", "RS256", - null, null, null); - JwtOptionsConfig options = new JwtOptionsConfig( - "https://auth.example.com", - "https://api.example.com", - asList(key0, key1), - Duration.ofSeconds(30)); + JwtOptionsConfig options = JwtOptionsConfig.builder() + .issuer("https://auth.example.com") + .audience("https://api.example.com") + .key() + .kty("EC") + .kid("1") + .use("enc") + .crv("P-256") + .x("MKBCTNIcKUSDii11ySs3526iDZ8AiTo7Tu6KPAqv7D4") + .y("4Etl6SRW2YiLUrN5vfvVHuhp7x8PxltmWWlbbM4IFyM") + .build() + .key() + .kty("RSA") + .kid("2011-04-29") + .n("0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx" + + "4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMs" + + "tn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2" + + "QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbI" + + "SD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqb" + + "w0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw") + .e("AQAB") + .alg("RS256") + .build() + .challenge(Duration.ofSeconds(30)) + .build(); String text = jsonb.toJson(options); diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/keys/JwtKeyConfigs.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/keys/JwtKeyConfigs.java index 61ab7ccad1..8fcce6ece3 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/keys/JwtKeyConfigs.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/keys/JwtKeyConfigs.java @@ -24,33 +24,29 @@ public final class JwtKeyConfigs static { // RFC 7515, section A.2.1 - RFC7515_RS256_CONFIG = new JwtKeyConfig( - "RSA", - "test", - "verify", - "ofgWCuLjybRlzo0tZWJjNiuSfb4p4fAkd_wWJcyQoTbji9k0l8W26mPddx" + - "HmfHQp-Vaw-4qPCJrcS2mJPMEzP1Pt0Bm4d4QlL-yRT-SFd2lZS-pCgNMs" + - "D1W_YpRPEwOWvG6b32690r2jZ47soMZo9wGzjb_7OMg0LOL-bSf63kpaSH" + - "SXndS5z5rexMdbBYUsLA9e-KXBdQOS-UTo7WTBEMa2R2CapHg665xsmtdV" + - "MTBQY4uDZlxvb3qCo5ZwKh9kG4LT6_I5IhlJH7aGhyxXFvUK-DWNmoudF8" + - "NAco9_h9iaGNj8q2ethFkMLs91kzk2PAcDTW9gb54h4FRWyuXpoQ", - "AQAB", - "RS256", - null, - null, - null); + RFC7515_RS256_CONFIG = JwtKeyConfig.builder() + .kty("RSA") + .kid("test") + .use("verify") + .n("ofgWCuLjybRlzo0tZWJjNiuSfb4p4fAkd_wWJcyQoTbji9k0l8W26mPddx" + + "HmfHQp-Vaw-4qPCJrcS2mJPMEzP1Pt0Bm4d4QlL-yRT-SFd2lZS-pCgNMs" + + "D1W_YpRPEwOWvG6b32690r2jZ47soMZo9wGzjb_7OMg0LOL-bSf63kpaSH" + + "SXndS5z5rexMdbBYUsLA9e-KXBdQOS-UTo7WTBEMa2R2CapHg665xsmtdV" + + "MTBQY4uDZlxvb3qCo5ZwKh9kG4LT6_I5IhlJH7aGhyxXFvUK-DWNmoudF8" + + "NAco9_h9iaGNj8q2ethFkMLs91kzk2PAcDTW9gb54h4FRWyuXpoQ") + .e("AQAB") + .alg("RS256") + .build(); // RFC 7515, section A.3.1 - RFC7515_ES256_CONFIG = new JwtKeyConfig( - "RSA", - "test", - "verify", - null, - null, - null, - "P-256", - "f83OJ3D2xF1Bg8vub9tLe1gHMzV76e8Tus9uPHvRVEU", - "x_FEzRu9m36HLN_tue659LNpXW6pCyStikYjKIWI5a0"); + RFC7515_ES256_CONFIG = JwtKeyConfig.builder() + .kty("RSA") + .kid("test") + .use("verify") + .crv("P-256") + .x("f83OJ3D2xF1Bg8vub9tLe1gHMzV76e8Tus9uPHvRVEU") + .y("x_FEzRu9m36HLN_tue659LNpXW6pCyStikYjKIWI5a0") + .build(); } private JwtKeyConfigs() diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfig.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfig.java index 1d5fb70ca9..e124c1ec23 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfig.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfig.java @@ -15,15 +15,28 @@ */ package io.aklivity.zilla.runtime.vault.filesystem.config; +import java.util.function.Function; + import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public class FileSystemOptionsConfig extends OptionsConfig +public final class FileSystemOptionsConfig extends OptionsConfig { public final FileSystemStoreConfig keys; public final FileSystemStoreConfig trust; public final FileSystemStoreConfig signers; - public FileSystemOptionsConfig( + public static FileSystemOptionsConfigBuilder builder() + { + return new FileSystemOptionsConfigBuilder<>(FileSystemOptionsConfig.class::cast); + } + + public static FileSystemOptionsConfigBuilder builder( + Function mapper) + { + return new FileSystemOptionsConfigBuilder<>(mapper); + } + + FileSystemOptionsConfig( FileSystemStoreConfig keys, FileSystemStoreConfig trust, FileSystemStoreConfig signers) diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfigBuilder.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfigBuilder.java new file mode 100644 index 0000000000..5a5879960c --- /dev/null +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfigBuilder.java @@ -0,0 +1,78 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.vault.filesystem.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.OptionsConfig; + +public final class FileSystemOptionsConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private FileSystemStoreConfig keys; + private FileSystemStoreConfig trust; + private FileSystemStoreConfig signers; + + FileSystemOptionsConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public FileSystemStoreConfigBuilder> keys() + { + return new FileSystemStoreConfigBuilder<>(this::keys); + } + + public FileSystemStoreConfigBuilder> trust() + { + return new FileSystemStoreConfigBuilder<>(this::trust); + } + + public FileSystemStoreConfigBuilder> signers() + { + return new FileSystemStoreConfigBuilder<>(this::signers); + } + + public FileSystemOptionsConfigBuilder keys( + FileSystemStoreConfig keys) + { + this.keys = keys; + return this; + } + + public FileSystemOptionsConfigBuilder trust( + FileSystemStoreConfig trust) + { + this.trust = trust; + return this; + } + + public FileSystemOptionsConfigBuilder signers( + FileSystemStoreConfig signers) + { + this.signers = signers; + return this; + } + + @Override + public T build() + { + return mapper.apply(new FileSystemOptionsConfig(keys, trust, signers)); + } +} diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfig.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfig.java index e149e22f64..3630dbb331 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfig.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfig.java @@ -15,13 +15,20 @@ */ package io.aklivity.zilla.runtime.vault.filesystem.config; -public class FileSystemStoreConfig +import static java.util.function.Function.identity; + +public final class FileSystemStoreConfig { public final String store; public final String type; public final String password; - public FileSystemStoreConfig( + public static FileSystemStoreConfigBuilder builder() + { + return new FileSystemStoreConfigBuilder<>(identity()); + } + + FileSystemStoreConfig( String store, String type, String password) diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfigBuilder.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfigBuilder.java new file mode 100644 index 0000000000..184480bb3e --- /dev/null +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfigBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.vault.filesystem.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class FileSystemStoreConfigBuilder implements ConfigBuilder +{ + private final Function mapper; + + private String store; + private String type; + private String password; + + FileSystemStoreConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + public FileSystemStoreConfigBuilder store( + String store) + { + this.store = store; + return this; + } + + public FileSystemStoreConfigBuilder type( + String type) + { + this.type = type; + return this; + } + + public FileSystemStoreConfigBuilder password( + String password) + { + this.password = password; + return this; + } + + @Override + public T build() + { + return mapper.apply(new FileSystemStoreConfig(store, type, password)); + } +} diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapter.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapter.java index 89aaf54c9b..c73a3a69e1 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapter.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapter.java @@ -23,7 +23,7 @@ import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi; import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; -import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemStoreConfig; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfigBuilder; import io.aklivity.zilla.runtime.vault.filesystem.internal.FileSystemVault; public final class FileSystemOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter @@ -76,16 +76,23 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - FileSystemStoreConfig keys = object.containsKey(KEYS_NAME) - ? store.adaptFromJson(object.getJsonObject(KEYS_NAME)) - : null; - FileSystemStoreConfig trust = object.containsKey(TRUST_NAME) - ? store.adaptFromJson(object.getJsonObject(TRUST_NAME)) - : null; - FileSystemStoreConfig signers = object.containsKey(SIGNERS_NAME) - ? store.adaptFromJson(object.getJsonObject(SIGNERS_NAME)) - : null; - - return new FileSystemOptionsConfig(keys, trust, signers); + FileSystemOptionsConfigBuilder fsOptions = FileSystemOptionsConfig.builder(); + + if (object.containsKey(KEYS_NAME)) + { + fsOptions.keys(store.adaptFromJson(object.getJsonObject(KEYS_NAME))); + } + + if (object.containsKey(TRUST_NAME)) + { + fsOptions.trust(store.adaptFromJson(object.getJsonObject(TRUST_NAME))); + } + + if (object.containsKey(SIGNERS_NAME)) + { + fsOptions.signers(store.adaptFromJson(object.getJsonObject(SIGNERS_NAME))); + } + + return fsOptions.build(); } } diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreConfigAdapter.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreConfigAdapter.java index f69447acf3..ed66c57afc 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreConfigAdapter.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemStoreConfigAdapter.java @@ -21,6 +21,7 @@ import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemStoreConfig; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemStoreConfigBuilder; public final class FileSystemStoreConfigAdapter implements JsonbAdapter { @@ -53,10 +54,19 @@ public JsonObject adaptToJson( public FileSystemStoreConfig adaptFromJson( JsonObject object) { - String store = object.getString(STORE_NAME); - String type = object.containsKey(TYPE_NAME) ? object.getString(TYPE_NAME) : null; - String password = object.containsKey(PASSWORD_NAME) ? object.getString(PASSWORD_NAME) : null; + FileSystemStoreConfigBuilder fsStore = FileSystemStoreConfig.builder() + .store(object.getString(STORE_NAME)); - return new FileSystemStoreConfig(store, type, password); + if (object.containsKey(TYPE_NAME)) + { + fsStore.type(object.getString(TYPE_NAME)); + } + + if (object.containsKey(PASSWORD_NAME)) + { + fsStore.password(object.getString(PASSWORD_NAME)); + } + + return fsStore.build(); } } diff --git a/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultTest.java b/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultTest.java index 341df5a648..fecdd376f6 100644 --- a/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultTest.java +++ b/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultTest.java @@ -26,16 +26,24 @@ import org.junit.Test; import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; -import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemStoreConfig; public class FileSystemVaultTest { @Test public void shouldResolveServer() throws Exception { - FileSystemStoreConfig keys = new FileSystemStoreConfig("stores/server/keys", "pkcs12", "generated"); - FileSystemStoreConfig trust = new FileSystemStoreConfig("stores/server/trust", "pkcs12", "generated"); - FileSystemOptionsConfig options = new FileSystemOptionsConfig(keys, trust, null); + FileSystemOptionsConfig options = FileSystemOptionsConfig.builder() + .keys() + .store("stores/server/keys") + .type("pkcs12") + .password("generated") + .build() + .trust() + .store("stores/server/trust") + .type("pkcs12") + .password("generated") + .build() + .build(); FileSystemVaultHandler vault = new FileSystemVaultHandler(options, FileSystemVaultTest.class::getResource); @@ -49,9 +57,18 @@ public void shouldResolveServer() throws Exception @Test public void shouldResolveClient() throws Exception { - FileSystemStoreConfig keys = new FileSystemStoreConfig("stores/client/keys", "pkcs12", "generated"); - FileSystemStoreConfig signers = new FileSystemStoreConfig("stores/server/trust", "pkcs12", "generated"); - FileSystemOptionsConfig options = new FileSystemOptionsConfig(keys, null, signers); + FileSystemOptionsConfig options = FileSystemOptionsConfig.builder() + .keys() + .store("stores/client/keys") + .type("pkcs12") + .password("generated") + .build() + .signers() + .store("stores/server/trust") + .type("pkcs12") + .password("generated") + .build() + .build(); FileSystemVaultHandler vault = new FileSystemVaultHandler(options, FileSystemVaultTest.class::getResource); diff --git a/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java b/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java index 6e7947a03f..f9e0d74826 100644 --- a/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java +++ b/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java @@ -80,7 +80,8 @@ public void shouldReadOptionsWithKeys() @Test public void shouldWriteOptions() { - FileSystemOptionsConfig options = new FileSystemOptionsConfig(null, null, null); + FileSystemOptionsConfig options = FileSystemOptionsConfig.builder() + .build(); String text = jsonb.toJson(options); @@ -91,8 +92,13 @@ public void shouldWriteOptions() @Test public void shouldWriteOptionsWithKeys() { - FileSystemStoreConfig keys = new FileSystemStoreConfig("localhost.p12", "pkcs12", "generated"); - FileSystemOptionsConfig options = new FileSystemOptionsConfig(keys, null, null); + FileSystemOptionsConfig options = FileSystemOptionsConfig.builder() + .keys() + .store("localhost.p12") + .type("pkcs12") + .password("generated") + .build() + .build(); String text = jsonb.toJson(options); From a9140950e772212274b7aa0bb0e6e10f3d522e74 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Thu, 10 Aug 2023 22:41:46 +0200 Subject: [PATCH 017/115] Add hashKey support to merged stream (#329) --- .../internal/stream/KafkaMergedFactory.java | 8 +- .../kafka/internal/stream/CacheMergedIT.java | 10 + .../kafka/internal/KafkaFunctions.java | 48 ++++ .../main/resources/META-INF/zilla/kafka.idl | 3 +- .../client.rpt | 105 ++++++++ .../server.rpt | 92 +++++++ .../client.rpt | 245 ++++++++++++++++++ .../server.rpt | 233 +++++++++++++++++ .../kafka/internal/KafkaFunctionsTest.java | 10 +- .../kafka/streams/application/MergedIT.java | 18 ++ 10 files changed, 768 insertions(+), 4 deletions(-) create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.dynamic.hash.key/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.dynamic.hash.key/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.dynamic.hash.key/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.dynamic.hash.key/server.rpt diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java index ef921f56df..702792edc7 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java @@ -1192,9 +1192,10 @@ private void onMergedInitialData( assert kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED; final KafkaMergedDataExFW kafkaMergedDataEx = kafkaDataEx.merged(); final KafkaKeyFW key = kafkaMergedDataEx.key(); + final KafkaKeyFW hashKey = kafkaMergedDataEx.hashKey(); final KafkaOffsetFW partition = kafkaMergedDataEx.partition(); final int partitionId = partition.partitionId(); - final int nextPartitionId = partitionId == DYNAMIC_PARTITION ? nextPartitionData(key) : partitionId; + final int nextPartitionId = partitionId == DYNAMIC_PARTITION ? nextPartitionData(hashKey, key) : partitionId; final KafkaUnmergedProduceStream newProducer = findProducePartitionLeader(nextPartitionId); assert newProducer != null; // TODO @@ -1220,10 +1221,13 @@ private KafkaOffsetType asMaximumOffset( } private int nextPartitionData( + KafkaKeyFW hashKey, KafkaKeyFW key) { final int partitionCount = leadersByPartitionId.size(); - final int keyHash = key.length() != -1 ? defaultKeyHash(key) : nextNullKeyHashData++; + final int keyHash = hashKey.length() != -1 ? defaultKeyHash(hashKey) : + key.length() != -1 ? defaultKeyHash(key) : + nextNullKeyHashData++; final int partitionId = partitionCount > 0 ? (0x7fff_ffff & keyHash) % partitionCount : 0; return partitionId; diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java index fe4ebd9737..49985db410 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java @@ -403,6 +403,16 @@ public void shouldProduceMergedMessageValuesDynamicHashed() throws Exception k3po.finish(); } + @Test + @Configuration("cache.options.merged.yaml") + @Specification({ + "${app}/merged.produce.message.values.dynamic.hash.key/client", + "${app}/unmerged.produce.message.values.dynamic.hash.key/server"}) + public void shouldProduceMergedMessageValuesDynamicHashKey() throws Exception + { + k3po.finish(); + } + @Test @Configuration("cache.options.merged.yaml") @Specification({ diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index 5d5516f7b5..8fc2295ab0 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -1218,6 +1218,7 @@ public KafkaDataExBuilder build() public final class KafkaMergedDataExBuilder { private final DirectBuffer keyRO = new UnsafeBuffer(0, 0); + private final DirectBuffer hashKeyRO = new UnsafeBuffer(0, 0); private final DirectBuffer nameRO = new UnsafeBuffer(0, 0); private final DirectBuffer valueRO = new UnsafeBuffer(0, 0); @@ -1306,6 +1307,23 @@ public KafkaMergedDataExBuilder key( return this; } + public KafkaMergedDataExBuilder hashKey( + String hashKey) + { + if (hashKey == null) + { + mergedDataExRW.hashKey(m -> m.length(-1) + .value((OctetsFW) null)); + } + else + { + hashKeyRO.wrap(hashKey.getBytes(UTF_8)); + mergedDataExRW.hashKey(k -> k.length(hashKeyRO.capacity()) + .value(hashKeyRO, 0, hashKeyRO.capacity())); + } + return this; + } + public KafkaMergedDataExBuilder delta( String deltaType, long ancestorOffset) @@ -1939,6 +1957,7 @@ public static final class KafkaDataExMatcherBuilder private final DirectBuffer bufferRO = new UnsafeBuffer(); private final DirectBuffer keyRO = new UnsafeBuffer(0, 0); + private final DirectBuffer hashKeyRO = new UnsafeBuffer(0, 0); private final DirectBuffer nameRO = new UnsafeBuffer(0, 0); private final DirectBuffer valueRO = new UnsafeBuffer(0, 0); @@ -2404,6 +2423,7 @@ public final class KafkaMergedDataExMatcherBuilder private Array32FW.Builder progressRW; private KafkaDeltaFW.Builder deltaRW; private KafkaKeyFW.Builder keyRW; + private KafkaKeyFW.Builder hashKeyRW; private Array32FW.Builder headersRW; private KafkaMergedDataExMatcherBuilder() @@ -2495,6 +2515,27 @@ public KafkaMergedDataExMatcherBuilder key( return this; } + public KafkaMergedDataExMatcherBuilder hashKey( + String hashKey) + { + assert hashKeyRW == null; + hashKeyRW = new KafkaKeyFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + + if (hashKey == null) + { + hashKeyRW.length(-1) + .value((OctetsFW) null); + } + else + { + hashKeyRO.wrap(hashKey.getBytes(UTF_8)); + hashKeyRW.length(hashKeyRO.capacity()) + .value(hashKeyRO, 0, hashKeyRO.capacity()); + } + + return this; + } + public KafkaMergedDataExMatcherBuilder delta( String delta, long ancestorOffset) @@ -2671,6 +2712,7 @@ private boolean match( matchDeferred(mergedDataEx) && matchTimestamp(mergedDataEx) && matchKey(mergedDataEx) && + matchHashKey(mergedDataEx) && matchDelta(mergedDataEx) && matchHeaders(mergedDataEx) && matchFilters(mergedDataEx); @@ -2706,6 +2748,12 @@ private boolean matchKey( return keyRW == null || keyRW.build().equals(mergedDataEx.key()); } + private boolean matchHashKey( + final KafkaMergedDataExFW mergedDataEx) + { + return hashKeyRW == null || hashKeyRW.build().equals(mergedDataEx.hashKey()); + } + private boolean matchDelta( final KafkaMergedDataExFW mergedDataEx) { diff --git a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl index 9f4a432235..c8a61b2afe 100644 --- a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl +++ b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl @@ -229,10 +229,11 @@ scope kafka { int32 deferred = 0; // INIT only (TODO: move to DATA frame) int64 timestamp = 0; // INIT only - int64 filters = -1; // INIT only + int64 filters = -1; // INIT only KafkaOffset partition; // INIT only KafkaOffset[] progress; // INIT only KafkaKey key; // INIT only + KafkaKey hashKey; // INIT only KafkaDelta delta; // INIT + FIN KafkaHeader[] headers; // INIT + FIN (produce), INIT only (fetch) } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.dynamic.hash.key/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.dynamic.hash.key/client.rpt new file mode 100644 index 0000000000..831b80ab24 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.dynamic.hash.key/client.rpt @@ -0,0 +1,105 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(-1, -1) + .key("a") + .hashKey("key7") + .build() + .build()} +write "Hello, world #A1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(-1, -1) + .key("b") + .hashKey("key8") + .build() + .build()} +write "Hello, world #B1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(-1, -1) + .key("a") + .hashKey("key7") + .build() + .build()} +write "Hello, world #A2" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(-1, -1) + .key("b") + .hashKey("key8") + .build() + .build()} +write "Hello, world #B2" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(-1, -1) + .key("c") + .hashKey("key9") + .build() + .build()} +write "Hello, world #C1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(-1, -1) + .key("c") + .hashKey("key9") + .build() + .build()} +write "Hello, world #C2" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.dynamic.hash.key/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.dynamic.hash.key/server.rpt new file mode 100644 index 0000000000..738cd0235c --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.message.values.dynamic.hash.key/server.rpt @@ -0,0 +1,92 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 16 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .partition(-1, -1) + .key("a") + .hashKey("key7") + .build() + .build()} +read "Hello, world #A1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .partition(-1, -1) + .key("b") + .hashKey("key8") + .build() + .build()} +read "Hello, world #B1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .partition(-1, -1) + .key("a") + .hashKey("key7") + .build() + .build()} +read "Hello, world #A2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .partition(-1, -1) + .key("b") + .hashKey("key8") + .build() + .build()} +read "Hello, world #B2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .partition(-1, -1) + .key("c") + .hashKey("key9") + .build() + .build()} +read "Hello, world #C1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .partition(-1, -1) + .key("c") + .hashKey("key9") + .build() + .build()} +read "Hello, world #C2" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.dynamic.hash.key/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.dynamic.hash.key/client.rpt new file mode 100644 index 0000000000..2b07e51ddb --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.dynamic.hash.key/client.rpt @@ -0,0 +1,245 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .partition(2, 3) + .build() + .build()} +read notify PARTITION_COUNT_3 + +connect await PARTITION_COUNT_3 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 1 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("a") + .build() + .build()} +write "Hello, world #A1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("a") + .build() + .build()} +write "Hello, world #A2" +write flush + +connect await PARTITION_COUNT_3 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 2 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(1) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(1) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("b") + .build() + .build()} +write "Hello, world #B1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("b") + .build() + .build()} +write "Hello, world #B2" +write flush + +connect await PARTITION_COUNT_3 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 3 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(2) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(2) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("c") + .build() + .build()} +write "Hello, world #C1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("c") + .build() + .build()} +write "Hello, world #C2" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.dynamic.hash.key/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.dynamic.hash.key/server.rpt new file mode 100644 index 0000000000..f86249e0dd --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.produce.message.values.dynamic.hash.key/server.rpt @@ -0,0 +1,233 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app1" + option zilla:window 64 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .partition(2, 3) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("a") + .build() + .build()} +read "Hello, world #A1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("a") + .build() + .build()} +read "Hello, world #A2" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(1) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(1) + .build() + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("b") + .build() + .build()} +read "Hello, world #B1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("b") + .build() + .build()} +read "Hello, world #B2" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(2) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(2) + .build() + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("c") + .build() + .build()} +read "Hello, world #C1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("c") + .build() + .build()} +read "Hello, world #C2" diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index ce08684543..010262a753 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -448,6 +448,7 @@ public void shouldGenerateMergedDataExtension() .partition(0, 0L) .progress(0, 1L) .key("match") + .hashKey("hashKey") .header("name", "value") .build() .build(); @@ -485,6 +486,10 @@ public void shouldGenerateMergedDataExtension() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))) && "value".equals(h.value() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))))); + + assertEquals("hashKey", mergedDataEx.hashKey() + .value() + .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); } @Test @@ -983,6 +988,7 @@ public void shouldMatchMergedDataExtension() throws Exception .timestamp(12345678L) .key("match") .header("name", "value") + .hashKey("hashKey") .build() .build(); @@ -996,8 +1002,9 @@ public void shouldMatchMergedDataExtension() throws Exception .progressItem(p -> p.partitionId(0).partitionOffset(1L)) .key(k -> k.length(5) .value(v -> v.set("match".getBytes(UTF_8)))) + .hashKey(k -> k.length(7) + .value(v -> v.set("hashKey".getBytes(UTF_8)))) .delta(d -> d.type(t -> t.set(KafkaDeltaType.NONE))) - .headersItem(h -> h.nameLen(4) .name(n -> n.set("name".getBytes(UTF_8))) .valueLen(5) @@ -1399,6 +1406,7 @@ public void shouldMatchMergedDataExtensionNullKey() throws Exception BytesMatcher matcher = KafkaFunctions.matchDataEx() .merged() .key(null) + .hashKey(null) .build() .build(); diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java index 2fb3d7ae0f..a6fd7859e0 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java @@ -270,6 +270,15 @@ public void shouldProduceMergedMessageValuesDynamicHashed() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/merged.produce.message.values.dynamic.hash.key/client", + "${app}/merged.produce.message.values.dynamic.hash.key/server"}) + public void shouldProduceMergedMessageValuesDynamicHashKey() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/merged.produce.message.flags.incomplete/client", @@ -489,6 +498,15 @@ public void shouldProduceUnmergedMessageValuesDynamicHashed() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/unmerged.produce.message.values.dynamic.hash.key/client", + "${app}/unmerged.produce.message.values.dynamic.hash.key/server"}) + public void shouldProduceUnMergedMessageValuesDynamicHashKey() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/unmerged.fetch.server.sent.close/client", From 88237c04a751c3dae9ee6d896927740635771585 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 14:17:45 -0700 Subject: [PATCH 018/115] Use explicit version for docker base image --- cloud/docker-image/src/main/docker/incubator/Dockerfile | 2 +- cloud/docker-image/src/main/docker/release/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker-image/src/main/docker/incubator/Dockerfile b/cloud/docker-image/src/main/docker/incubator/Dockerfile index 7a0f47e9c0..e5afbea60f 100644 --- a/cloud/docker-image/src/main/docker/incubator/Dockerfile +++ b/cloud/docker-image/src/main/docker/incubator/Dockerfile @@ -27,7 +27,7 @@ RUN apk add --no-cache wget RUN ./zpmw install --debug --exclude-remote-repositories RUN ./zpmw clean --keep-image -FROM alpine +FROM alpine:3.18.2 COPY --from=build /.zpm /opt/zilla/.zpm COPY --from=build /zilla /opt/zilla/zilla diff --git a/cloud/docker-image/src/main/docker/release/Dockerfile b/cloud/docker-image/src/main/docker/release/Dockerfile index 7a0f47e9c0..e5afbea60f 100644 --- a/cloud/docker-image/src/main/docker/release/Dockerfile +++ b/cloud/docker-image/src/main/docker/release/Dockerfile @@ -27,7 +27,7 @@ RUN apk add --no-cache wget RUN ./zpmw install --debug --exclude-remote-repositories RUN ./zpmw clean --keep-image -FROM alpine +FROM alpine:3.18.2 COPY --from=build /.zpm /opt/zilla/.zpm COPY --from=build /zilla /opt/zilla/zilla From 0cd07e09597bdc9d9f2e546b81739dc64293ddb3 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 14:29:02 -0700 Subject: [PATCH 019/115] Configure dependabot ecosystems --- .github/dependabot.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..9af6af57da --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: +- package-ecosystem: maven + directory: / + schedule: + interval: daily +- package-ecosystem: docker + directory: /cloud/docker-image/src/main/docker + schedule: + interval: daily From b98e8c3368b0d711fddacb43575f05d549c7286e Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 15:01:08 -0700 Subject: [PATCH 020/115] Enable github actions dependency scanning --- .github/dependabot.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9af6af57da..40dbaf2c4a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,3 +8,7 @@ updates: directory: /cloud/docker-image/src/main/docker schedule: interval: daily +- package-ecosystem: github-actions + directory: / + schedule: + interval: daily From 883993cb2a30c17430a51137a7649396034039d5 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 15:06:11 -0700 Subject: [PATCH 021/115] Create codeql.yml --- .github/workflows/codeql.yml | 82 ++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 .github/workflows/codeql.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000000..e4a7f77bca --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,82 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "develop", "main" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "develop" ] + schedule: + - cron: '27 10 * * 4' + +jobs: + analyze: + name: Analyze + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners + # Consider using larger runners for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'java', 'javascript' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby', 'swift' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" From a0a6273bcd30529d3f6e73ccfbe1e3b2ec69fad3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:06:52 -0700 Subject: [PATCH 022/115] Bump com.mycila:license-maven-plugin from 4.1 to 4.2 (#334) Bumps com.mycila:license-maven-plugin from 4.1 to 4.2. --- updated-dependencies: - dependency-name: com.mycila:license-maven-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 1e1a529cb8..61d64ff6af 100644 --- a/pom.xml +++ b/pom.xml @@ -279,7 +279,7 @@ com.mycila license-maven-plugin - 4.1 + 4.2
COPYRIGHT
From b32c8e2c6743bd408436c3707941e9939e411aa5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:08:39 -0700 Subject: [PATCH 023/115] Bump byteman.version from 4.0.20 to 4.0.21 (#332) Bumps `byteman.version` from 4.0.20 to 4.0.21. Updates `org.jboss.byteman:byteman` from 4.0.20 to 4.0.21 - [Commits](https://github.com/adinn/byteman/compare/4.0.20...4.0.21) Updates `org.jboss.byteman:byteman-submit` from 4.0.20 to 4.0.21 Updates `org.jboss.byteman:byteman-install` from 4.0.20 to 4.0.21 Updates `org.jboss.byteman:byteman-bmunit` from 4.0.20 to 4.0.21 Updates `org.jboss.byteman:byteman-rulecheck-maven-plugin` from 4.0.20 to 4.0.21 --- updated-dependencies: - dependency-name: org.jboss.byteman:byteman dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: org.jboss.byteman:byteman-submit dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: org.jboss.byteman:byteman-install dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: org.jboss.byteman:byteman-bmunit dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: org.jboss.byteman:byteman-rulecheck-maven-plugin dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 61d64ff6af..c3fe108572 100644 --- a/pom.xml +++ b/pom.xml @@ -47,7 +47,7 @@ 4.11.1 1.6.0 5.8.2 - 4.0.20 + 4.0.21 2.6.0 5.3.1 3.1.0 From e9f214d4cecdbc0ce0b6f6b224c26d6fa69136e6 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 15:12:36 -0700 Subject: [PATCH 024/115] Configure codeql langage matrix --- .github/workflows/codeql.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index e4a7f77bca..29e42d03b9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -38,7 +38,7 @@ jobs: strategy: fail-fast: false matrix: - language: [ 'java', 'javascript' ] + language: [ 'java' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby', 'swift' ] # Use only 'java' to analyze code written in Java, Kotlin or both # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both From 37e204f72d0452f6a7d1d2cdf6b88b372dc82fbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:53:32 -0700 Subject: [PATCH 025/115] Bump antlr4.version from 4.11.1 to 4.13.0 (#331) Bumps `antlr4.version` from 4.11.1 to 4.13.0. Updates `org.antlr:antlr4-runtime` from 4.11.1 to 4.13.0 - [Release notes](https://github.com/antlr/antlr4/releases) - [Changelog](https://github.com/antlr/antlr4/blob/dev/doc/go-changes.md) - [Commits](https://github.com/antlr/antlr4/compare/4.11.1...4.13.0) Updates `org.antlr:antlr4-maven-plugin` from 4.11.1 to 4.13.0 - [Release notes](https://github.com/antlr/antlr4/releases) - [Changelog](https://github.com/antlr/antlr4/blob/dev/doc/go-changes.md) - [Commits](https://github.com/antlr/antlr4/compare/4.11.1...4.13.0) --- updated-dependencies: - dependency-name: org.antlr:antlr4-runtime dependency-type: direct:production update-type: version-update:semver-minor - dependency-name: org.antlr:antlr4-maven-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index c3fe108572..26198c40f8 100644 --- a/pom.xml +++ b/pom.xml @@ -44,7 +44,7 @@ UTF-8 io/aklivity/zilla/conf/checkstyle/configuration.xml io/aklivity/zilla/conf/checkstyle/suppressions.xml - 4.11.1 + 4.13.0 1.6.0 5.8.2 4.0.21 From 675ae263c3934715e92a7e8c81e4d0daed360bc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:53:45 -0700 Subject: [PATCH 026/115] Bump org.apache.maven.plugins:maven-source-plugin from 3.0.1 to 3.3.0 (#333) Bumps [org.apache.maven.plugins:maven-source-plugin](https://github.com/apache/maven-source-plugin) from 3.0.1 to 3.3.0. - [Commits](https://github.com/apache/maven-source-plugin/compare/maven-source-plugin-3.0.1...maven-source-plugin-3.3.0) --- updated-dependencies: - dependency-name: org.apache.maven.plugins:maven-source-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 26198c40f8..6574522cd8 100644 --- a/pom.xml +++ b/pom.xml @@ -690,7 +690,7 @@ org.apache.maven.plugins maven-source-plugin - 3.0.1 + 3.3.0 From c799c965a09b486d2a08ef77e50f74b4c55b355e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:54:11 -0700 Subject: [PATCH 027/115] Bump org.moditect:moditect-maven-plugin from 1.0.0.RC1 to 1.0.0.Final (#335) Bumps [org.moditect:moditect-maven-plugin](https://github.com/moditect/moditect) from 1.0.0.RC1 to 1.0.0.Final. - [Release notes](https://github.com/moditect/moditect/releases) - [Commits](https://github.com/moditect/moditect/compare/1.0.0.RC1...1.0.0.Final) --- updated-dependencies: - dependency-name: org.moditect:moditect-maven-plugin dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 6574522cd8..907aa559f2 100644 --- a/pom.xml +++ b/pom.xml @@ -376,7 +376,7 @@ org.moditect moditect-maven-plugin - 1.0.0.RC1 + 1.0.0.Final src/main/moditect/module-info.java From 00e3381aa4f0b0e4095090310e2cab6b3976c57b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:54:28 -0700 Subject: [PATCH 028/115] Bump actions/cache from 2 to 3 (#336) Bumps [actions/cache](https://github.com/actions/cache) from 2 to 3. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 353ebfc537..9dcfc43bd1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,7 +23,7 @@ jobs: with: java-version: ${{ matrix.java }} - name: Cache Maven packages - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: | ~/.m2/repository From 9d6e09ecc36384bccf820ed3fe2ec01c354394f5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:56:10 -0700 Subject: [PATCH 029/115] Bump actions/checkout from 2 to 3 (#337) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9dcfc43bd1..3da7559742 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Checkout GitHub sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup JDK ${{ matrix.java }} uses: actions/setup-java@v1 with: From 4469ed79d7c4ac6891bd15147ffcec46f76d3c15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:56:37 -0700 Subject: [PATCH 030/115] Bump org.apache.maven.plugins:maven-compiler-plugin from 3.8.0 to 3.11.0 (#339) Bumps [org.apache.maven.plugins:maven-compiler-plugin](https://github.com/apache/maven-compiler-plugin) from 3.8.0 to 3.11.0. - [Release notes](https://github.com/apache/maven-compiler-plugin/releases) - [Commits](https://github.com/apache/maven-compiler-plugin/compare/maven-compiler-plugin-3.8.0...maven-compiler-plugin-3.11.0) --- updated-dependencies: - dependency-name: org.apache.maven.plugins:maven-compiler-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 907aa559f2..fe64aa6262 100644 --- a/pom.xml +++ b/pom.xml @@ -361,7 +361,7 @@ org.apache.maven.plugins maven-compiler-plugin - 3.8.0 + 3.11.0 org.apache.maven.plugins From 905bb0c0896d2314876a38afc37ce188f002c170 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 16:02:16 -0700 Subject: [PATCH 031/115] Update codeql.yml --- .github/workflows/codeql.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 29e42d03b9..296e406434 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -9,7 +9,7 @@ # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # -name: "CodeQL" +name: CodeQL on: push: From 577068533cf0513bc02d6027cda53f7c0773cd1d Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 16:07:27 -0700 Subject: [PATCH 032/115] Specify distribution in setup-java v2 action --- .github/workflows/build.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 353ebfc537..46e477fc5b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,8 +19,9 @@ jobs: - name: Checkout GitHub sources uses: actions/checkout@v2 - name: Setup JDK ${{ matrix.java }} - uses: actions/setup-java@v1 + uses: actions/setup-java@v2 with: + distribution: zulu java-version: ${{ matrix.java }} - name: Cache Maven packages uses: actions/cache@v2 From 826297d80b74d60c1b1d6c7028dc5b497809a11e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 17:02:13 -0700 Subject: [PATCH 033/115] Bump actions/setup-java from 1 to 3 (#338) Bumps [actions/setup-java](https://github.com/actions/setup-java) from 1 to 3. - [Release notes](https://github.com/actions/setup-java/releases) - [Commits](https://github.com/actions/setup-java/compare/v1...v3) --- updated-dependencies: - dependency-name: actions/setup-java dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2a4dec689f..bed04f579b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,7 +19,7 @@ jobs: - name: Checkout GitHub sources uses: actions/checkout@v3 - name: Setup JDK ${{ matrix.java }} - uses: actions/setup-java@v2 + uses: actions/setup-java@v3 with: distribution: zulu java-version: ${{ matrix.java }} From af92eec7ffac911641bf8fdfe5d11c98c797f8db Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 17:11:40 -0700 Subject: [PATCH 034/115] Update codeql.yml --- .github/workflows/codeql.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 296e406434..d0b61a6cd9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -60,6 +60,15 @@ jobs: # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality + # Cache downloaded Maven dependencies + - name: Cache Maven packages + uses: actions/cache@v3 + with: + path: | + ~/.m2/repository + !~/.m2/repository/io/aklivity/zilla + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2 # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). # If this step fails, then you should remove it and run the build manually (see below) From cdb6cd10e0a4a341a245f8f164c22b28eb5d4a81 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 10 Aug 2023 18:17:06 -0700 Subject: [PATCH 035/115] Specify each different Dockerfile directory for dependency scanning --- .github/dependabot.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 40dbaf2c4a..1eaaf65b02 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,11 @@ updates: schedule: interval: daily - package-ecosystem: docker - directory: /cloud/docker-image/src/main/docker + directory: /cloud/docker-image/src/main/docker/release + schedule: + interval: daily +- package-ecosystem: docker + directory: /cloud/docker-image/src/main/docker/incubator schedule: interval: daily - package-ecosystem: github-actions From e988658e45659feaf62cf439078a450282cc84aa Mon Sep 17 00:00:00 2001 From: Attila Kreiner Date: Fri, 11 Aug 2023 23:18:42 +0200 Subject: [PATCH 036/115] Ignore CacheFetchIT.shouldFetchFilterSyncWithData (#351) --- .../runtime/binding/kafka/internal/stream/CacheFetchIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java index 00c7a9756b..65b164b076 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java @@ -466,6 +466,7 @@ public void shouldFetchFilterSync() throws Exception } @Test + @Ignore @Configuration("cache.yaml") @Specification({ "${app}/filter.sync.with.data/client", From c8fef34ba6fcf4ffcc712cfe428e28594848ff96 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Aug 2023 14:28:14 -0700 Subject: [PATCH 037/115] Bump io.fabric8:docker-maven-plugin from 0.39.1 to 0.43.2 (#348) Bumps [io.fabric8:docker-maven-plugin](https://github.com/fabric8io/docker-maven-plugin) from 0.39.1 to 0.43.2. - [Release notes](https://github.com/fabric8io/docker-maven-plugin/releases) - [Changelog](https://github.com/fabric8io/docker-maven-plugin/blob/master/doc/changelog.md) - [Commits](https://github.com/fabric8io/docker-maven-plugin/compare/v0.39.1...v0.43.2) --- updated-dependencies: - dependency-name: io.fabric8:docker-maven-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cloud/docker-image/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 3d0fb42c96..0bf9572bff 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -237,7 +237,7 @@ io.fabric8 docker-maven-plugin - 0.39.1 + 0.43.2 ${*} From a4eac6288160ec53af35b128bf49693d85a03da2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Aug 2023 14:50:36 -0700 Subject: [PATCH 038/115] Bump org.apache.maven.plugin-tools:maven-plugin-annotations (#344) Bumps [org.apache.maven.plugin-tools:maven-plugin-annotations](https://github.com/apache/maven-plugin-tools) from 3.5 to 3.9.0. - [Release notes](https://github.com/apache/maven-plugin-tools/releases) - [Commits](https://github.com/apache/maven-plugin-tools/compare/maven-plugin-tools-3.5...maven-plugin-tools-3.9.0) --- updated-dependencies: - dependency-name: org.apache.maven.plugin-tools:maven-plugin-annotations dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index fe64aa6262..0522a4faab 100644 --- a/pom.xml +++ b/pom.xml @@ -163,7 +163,7 @@ org.apache.maven.plugin-tools maven-plugin-annotations - 3.5 + 3.9.0 org.apache.maven.plugin-testing From 87e6f46a39af92d05b665aac69ad4782aa42c4aa Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Fri, 11 Aug 2023 15:02:53 -0700 Subject: [PATCH 039/115] Metadata for group merged stream (#349) * Metadata for merged stream * Group is not a first class Kafka protocol API --------- Co-authored-by: John Fallows --- .../src/main/resources/META-INF/zilla/kafka.idl | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl index c8a61b2afe..b0f3f76845 100644 --- a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl +++ b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl @@ -166,38 +166,39 @@ scope kafka { enum KafkaApi (uint8) { + GROUP (253), BOOTSTRAP (254), MERGED (255), META (3), DESCRIBE (32), FETCH (1), - PRODUCE (0), - GROUP (10) + PRODUCE (0) } union KafkaBeginEx switch (uint8) extends core::stream::Extension { + case 253: kafka::stream::KafkaGroupBeginEx group; case 254: kafka::stream::KafkaBootstrapBeginEx bootstrap; case 255: kafka::stream::KafkaMergedBeginEx merged; case 3: kafka::stream::KafkaMetaBeginEx meta; case 32: kafka::stream::KafkaDescribeBeginEx describe; case 1: kafka::stream::KafkaFetchBeginEx fetch; case 0: kafka::stream::KafkaProduceBeginEx produce; - case 10: kafka::stream::KafkaGroupBeginEx group; } union KafkaDataEx switch (uint8) extends core::stream::Extension { + case 253: kafka::stream::KafkaGroupDataEx group; case 255: kafka::stream::KafkaMergedDataEx merged; case 3: kafka::stream::KafkaMetaDataEx meta; case 32: kafka::stream::KafkaDescribeDataEx describe; case 1: kafka::stream::KafkaFetchDataEx fetch; case 0: kafka::stream::KafkaProduceDataEx produce; - case 10: kafka::stream::KafkaGroupDataEx group; } union KafkaFlushEx switch (uint8) extends core::stream::Extension { + case 253: kafka::stream::KafkaGroupFlushEx group; case 255: kafka::stream::KafkaMergedFlushEx merged; case 1: kafka::stream::KafkaFetchFlushEx fetch; case 0: kafka::stream::KafkaProduceFlushEx produce; @@ -217,6 +218,7 @@ scope kafka { KafkaCapabilities capabilities = PRODUCE_AND_FETCH; string16 topic; + string16 groupId = null; KafkaOffset[] partitions; KafkaFilter[] filters; // ORed KafkaEvaluation evaluation = LAZY; @@ -247,6 +249,11 @@ scope kafka KafkaKey key; } + struct KafkaGroupFlushEx + { + KafkaOffset partition; + } + struct KafkaMetaBeginEx { string16 topic; From 5f4982c149db2662f9df090ce4a9fc71201a3805 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Aug 2023 15:03:23 -0700 Subject: [PATCH 040/115] Bump alpine in /cloud/docker-image/src/main/docker/release (#343) Bumps alpine from 3.18.2 to 3.18.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cloud/docker-image/src/main/docker/release/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker-image/src/main/docker/release/Dockerfile b/cloud/docker-image/src/main/docker/release/Dockerfile index e5afbea60f..905f4f53b8 100644 --- a/cloud/docker-image/src/main/docker/release/Dockerfile +++ b/cloud/docker-image/src/main/docker/release/Dockerfile @@ -27,7 +27,7 @@ RUN apk add --no-cache wget RUN ./zpmw install --debug --exclude-remote-repositories RUN ./zpmw clean --keep-image -FROM alpine:3.18.2 +FROM alpine:3.18.3 COPY --from=build /.zpm /opt/zilla/.zpm COPY --from=build /zilla /opt/zilla/zilla From 2273deb6aac710bf9f1347a378098a317257840c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Aug 2023 15:03:52 -0700 Subject: [PATCH 041/115] Bump alpine in /cloud/docker-image/src/main/docker/incubator (#341) Bumps alpine from 3.18.2 to 3.18.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cloud/docker-image/src/main/docker/incubator/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker-image/src/main/docker/incubator/Dockerfile b/cloud/docker-image/src/main/docker/incubator/Dockerfile index e5afbea60f..905f4f53b8 100644 --- a/cloud/docker-image/src/main/docker/incubator/Dockerfile +++ b/cloud/docker-image/src/main/docker/incubator/Dockerfile @@ -27,7 +27,7 @@ RUN apk add --no-cache wget RUN ./zpmw install --debug --exclude-remote-repositories RUN ./zpmw clean --keep-image -FROM alpine:3.18.2 +FROM alpine:3.18.3 COPY --from=build /.zpm /opt/zilla/.zpm COPY --from=build /zilla /opt/zilla/zilla From f9c33c49593f13dbcb08a42ea8f7b5a7fca7ad6d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Aug 2023 15:04:14 -0700 Subject: [PATCH 042/115] Bump io.kokuwa.maven:helm-maven-plugin from 6.6.0 to 6.10.0 (#345) Bumps [io.kokuwa.maven:helm-maven-plugin](https://github.com/kokuwaio/helm-maven-plugin) from 6.6.0 to 6.10.0. - [Release notes](https://github.com/kokuwaio/helm-maven-plugin/releases) - [Commits](https://github.com/kokuwaio/helm-maven-plugin/compare/6.6.0...6.10.0) --- updated-dependencies: - dependency-name: io.kokuwa.maven:helm-maven-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cloud/helm-chart/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/helm-chart/pom.xml b/cloud/helm-chart/pom.xml index 81c4c6b16b..853e06803a 100644 --- a/cloud/helm-chart/pom.xml +++ b/cloud/helm-chart/pom.xml @@ -51,7 +51,7 @@ io.kokuwa.maven helm-maven-plugin - 6.6.0 + 6.10.0 true src/main/helm From 96c2c4ff3081b346b2c3e2b5855d8c13bcd50cfc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Aug 2023 16:58:43 -0700 Subject: [PATCH 043/115] Bump org.sonatype.plexus:plexus-sec-dispatcher from 1.3 to 1.4 (#347) Bumps org.sonatype.plexus:plexus-sec-dispatcher from 1.3 to 1.4. --- updated-dependencies: - dependency-name: org.sonatype.plexus:plexus-sec-dispatcher dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- manager/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manager/pom.xml b/manager/pom.xml index 24a70a709c..bbd7fe34da 100644 --- a/manager/pom.xml +++ b/manager/pom.xml @@ -40,7 +40,7 @@ org.sonatype.plexus plexus-sec-dispatcher - 1.3 + 1.4 org.sonatype.sisu From ecf41cb8f4604b4d685e717e7eafdf2d88c59623 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Fri, 11 Aug 2023 17:25:41 -0700 Subject: [PATCH 044/115] Use mockito version from parent pom --- runtime/command-metrics/pom.xml | 2 -- 1 file changed, 2 deletions(-) diff --git a/runtime/command-metrics/pom.xml b/runtime/command-metrics/pom.xml index c9ff4c0377..ad52828b76 100644 --- a/runtime/command-metrics/pom.xml +++ b/runtime/command-metrics/pom.xml @@ -28,7 +28,6 @@ 11 0.50 2 - 4.9.0 @@ -52,7 +51,6 @@ org.mockito mockito-core - ${mockito.version} test From 6fadd76b51becc7481c6ae8e09ced9c3e36e7eb6 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Sun, 13 Aug 2023 17:30:52 -0700 Subject: [PATCH 045/115] Include JDK 20 in build matrix (#352) --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bed04f579b..1f134fc0d8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - java: [ 11, 17 ] + java: [ 11, 17, 20 ] steps: - name: Checkout GitHub sources From 7e6af071abbf178a60b9798aa6ad3f7e26588e89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:56:02 -0700 Subject: [PATCH 046/115] Bump com.squareup:javapoet from 1.9.0 to 1.13.0 (#355) Bumps [com.squareup:javapoet](https://github.com/square/javapoet) from 1.9.0 to 1.13.0. - [Changelog](https://github.com/square/javapoet/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/javapoet/compare/javapoet-1.9.0...javapoet-1.13.0) --- updated-dependencies: - dependency-name: com.squareup:javapoet dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 0522a4faab..16dc0902ca 100644 --- a/pom.xml +++ b/pom.xml @@ -123,7 +123,7 @@ com.squareup javapoet - 1.9.0 + 1.13.0 org.junit.jupiter From fb086b9e303b1c862eab1be36fd32ab7a7840bd9 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Mon, 14 Aug 2023 16:52:44 -0700 Subject: [PATCH 047/115] Ignore broken dependencies --- .github/dependabot.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 1eaaf65b02..f6887e2230 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -2,6 +2,9 @@ version: 2 updates: - package-ecosystem: maven directory: / + ignore: + - dependency-name: "org.kaazing:k3po.*" + versions: [ "4.x", "5.x" ] schedule: interval: daily - package-ecosystem: docker From 750bc6d185118ffedc34316c59c50d0e3b8a089b Mon Sep 17 00:00:00 2001 From: bmaidics Date: Tue, 15 Aug 2023 02:09:44 +0200 Subject: [PATCH 048/115] Request-response mqtt-kafka (#325) --- .../kafka/publish.empty.message/client.rpt | 4 +- .../kafka/publish.empty.message/server.rpt | 4 +- .../publish.multiple.messages/client.rpt | 12 ++-- .../publish.multiple.messages/server.rpt | 12 ++-- .../client.rpt | 10 +-- .../server.rpt | 10 ++- .../kafka/publish.one.message/client.rpt | 10 ++- .../kafka/publish.one.message/server.rpt | 9 ++- .../streams/kafka/publish.retained/client.rpt | 16 ++--- .../streams/kafka/publish.retained/server.rpt | 16 ++--- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../publish.with.user.property/client.rpt | 4 +- .../publish.with.user.property/server.rpt | 4 +- .../client.rpt | 8 +-- .../server.rpt | 8 +-- .../kafka/session.client.takeover/client.rpt | 8 +-- .../kafka/session.client.takeover/server.rpt | 8 +-- .../session.exists.clean.start/client.rpt | 2 +- .../session.exists.clean.start/server.rpt | 2 +- .../client.rpt | 6 +- .../server.rpt | 6 +- .../kafka/session.subscribe/client.rpt | 6 +- .../kafka/session.subscribe/server.rpt | 6 +- .../client.rpt | 6 +- .../server.rpt | 6 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../subscribe.client.sent.abort/client.rpt | 2 +- .../subscribe.client.sent.abort/server.rpt | 2 +- .../subscribe.client.sent.data/client.rpt | 2 +- .../subscribe.client.sent.data/server.rpt | 2 +- .../subscribe.client.sent.reset/client.rpt | 2 +- .../subscribe.client.sent.reset/server.rpt | 2 +- .../client.rpt | 42 ++++++------- .../server.rpt | 42 ++++++------- .../client.rpt | 36 +++++------ .../server.rpt | 36 +++++------ .../client.rpt | 34 +++++------ .../server.rpt | 34 +++++------ .../subscribe.filter.change.retain/client.rpt | 36 +++++------ .../subscribe.filter.change.retain/server.rpt | 36 +++++------ .../subscribe.multiple.message/client.rpt | 6 +- .../subscribe.multiple.message/server.rpt | 10 +-- .../client.rpt | 6 +- .../server.rpt | 6 +- .../client.rpt | 6 +- .../server.rpt | 6 +- .../client.rpt | 6 +- .../server.rpt | 6 +- .../kafka/subscribe.one.message/client.rpt | 6 +- .../kafka/subscribe.one.message/server.rpt | 6 +- .../subscribe.publish.no.local/client.rpt | 10 +-- .../subscribe.publish.no.local/server.rpt | 10 +-- .../client.rpt | 12 ++-- .../server.rpt | 12 ++-- .../client.rpt | 6 +- .../server.rpt | 6 +- .../streams/kafka/subscribe.retain/client.rpt | 12 ++-- .../streams/kafka/subscribe.retain/server.rpt | 12 ++-- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../subscribe.server.sent.abort/client.rpt | 2 +- .../subscribe.server.sent.abort/server.rpt | 2 +- .../subscribe.server.sent.flush/client.rpt | 2 +- .../subscribe.server.sent.flush/server.rpt | 2 +- .../subscribe.server.sent.reset/client.rpt | 2 +- .../subscribe.server.sent.reset/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 6 +- .../server.rpt | 6 +- .../client.rpt | 6 +- .../server.rpt | 6 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../unsubscribe.after.subscribe/client.rpt | 2 +- .../unsubscribe.after.subscribe/server.rpt | 2 +- .../client.rpt | 6 +- .../server.rpt | 6 +- .../config/MqttKafkaHeaderHelper.java | 24 ++++++-- .../stream/MqttKafkaPublishFactory.java | 61 ++++++++++++++++++- .../stream/MqttKafkaSubscribeFactory.java | 4 +- 97 files changed, 467 insertions(+), 381 deletions(-) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt index 6bb9ec3c76..16715f6575 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt @@ -35,8 +35,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt index 8aedc49335..79343cc379 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt @@ -38,8 +38,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt index 8a506a658d..997314b9e1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt @@ -35,8 +35,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -51,8 +51,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -68,8 +68,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt index 75a80dc739..ebd271846d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt @@ -38,8 +38,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -53,8 +53,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -68,8 +68,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt index 7ce14030c9..b07afe10ea 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt @@ -35,15 +35,17 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .headerInt("zilla:timeout-ms", 15000) .header("zilla:content-type", "message") .header("zilla:format", "TEXT") - .header("zilla:reply-to", "sensor/one") + .header("zilla:reply-to", "messages") + .header("zilla:reply-key", "sensor/one") + .header("zilla:reply-filter", "sensor") + .header("zilla:reply-filter", "one") .header("zilla:correlation-id", "info") .build() .build()} - write "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt index 3d12d7a752..086835619b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt @@ -38,15 +38,19 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .headerInt("zilla:timeout-ms", 15000) .header("zilla:content-type", "message") .header("zilla:format", "TEXT") - .header("zilla:reply-to", "sensor/one") + .header("zilla:reply-to", "messages") + .header("zilla:reply-key", "sensor/one") + .header("zilla:reply-filter", "sensor") + .header("zilla:reply-filter", "one") .header("zilla:correlation-id", "info") .build() .build()} + read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt index b9d3d28ab1..bdea0ddfcb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt @@ -35,15 +35,19 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .headerInt("zilla:timeout-ms", 15000) .header("zilla:content-type", "message") .header("zilla:format", "TEXT") - .header("zilla:reply-to", "sensor/one") + .header("zilla:reply-to", "mqtt_messages") + .header("zilla:reply-key", "sensor/one") + .header("zilla:reply-filter", "sensor") + .header("zilla:reply-filter", "one") .header("zilla:correlation-id", "info") .build() .build()} + write "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt index e598440227..ebbdf81dd1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt @@ -38,13 +38,16 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .headerInt("zilla:timeout-ms", 15000) .header("zilla:content-type", "message") .header("zilla:format", "TEXT") - .header("zilla:reply-to", "sensor/one") + .header("zilla:reply-to", "mqtt_messages") + .header("zilla:reply-key", "sensor/one") + .header("zilla:reply-filter", "sensor") + .header("zilla:reply-filter", "one") .header("zilla:correlation-id", "info") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt index 11f5ae9078..418d9c94af 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt @@ -35,8 +35,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -50,8 +50,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -65,8 +65,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -101,8 +101,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt index 700f0de29a..da7e2a6718 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt @@ -38,8 +38,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -53,8 +53,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -68,8 +68,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -101,8 +101,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt index a9a56b7bed..be93f2900d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt @@ -35,8 +35,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensors/1") - .header("zilla:topic", "sensors") - .header("zilla:topic", "1") + .header("zilla:filter", "sensors") + .header("zilla:filter", "1") .header("zilla:local", "755452d5-e2ef-4113-b9c6-2f53de96fd76") .header("zilla:format", "TEXT") .header("row1", "1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt index f822453ebe..6044b6f9ed 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt @@ -38,8 +38,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensors/1") - .header("zilla:topic", "sensors") - .header("zilla:topic", "1") + .header("zilla:filter", "sensors") + .header("zilla:filter", "1") .header("zilla:local", "755452d5-e2ef-4113-b9c6-2f53de96fd76") .header("zilla:format", "TEXT") .header("row1", "1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt index 900025fe31..f4b9cb78c3 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt @@ -35,8 +35,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensors/1") - .header("zilla:topic", "sensors") - .header("zilla:topic", "1") + .header("zilla:filter", "sensors") + .header("zilla:filter", "1") .header("zilla:local", "755452d5-e2ef-4113-b9c6-2f53de96fd76") .header("zilla:format", "TEXT") .header("row1", "1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt index 3a5c073c81..c126ded380 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt @@ -38,8 +38,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensors/1") - .header("zilla:topic", "sensors") - .header("zilla:topic", "1") + .header("zilla:filter", "sensors") + .header("zilla:filter", "1") .header("zilla:local", "755452d5-e2ef-4113-b9c6-2f53de96fd76") .header("zilla:format", "TEXT") .header("row1", "1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt index 1b33c42dc8..96824eac7c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt @@ -35,8 +35,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensors/1") - .header("zilla:topic", "sensors") - .header("zilla:topic", "1") + .header("zilla:filter", "sensors") + .header("zilla:filter", "1") .header("zilla:local", "755452d5-e2ef-4113-b9c6-2f53de96fd76") .header("zilla:format", "TEXT") .header("row", "1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt index 7dccd60e84..eb22fd5484 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt @@ -38,8 +38,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensors/1") - .header("zilla:topic", "sensors") - .header("zilla:topic", "1") + .header("zilla:filter", "sensors") + .header("zilla:filter", "1") .header("zilla:local", "755452d5-e2ef-4113-b9c6-2f53de96fd76") .header("zilla:format", "TEXT") .header("row", "1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt index 27d934b82a..cc1a004b2b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt @@ -143,7 +143,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -268,7 +268,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -287,8 +287,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt index b99284804c..5023162a29 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt @@ -133,7 +133,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -242,7 +242,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -262,8 +262,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt index ed7bcbf4e0..72f713d0a6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt @@ -179,7 +179,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -331,7 +331,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -350,8 +350,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client-1") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt index 0a31ece87b..16e4a117a9 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt @@ -175,7 +175,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -319,7 +319,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -339,8 +339,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client-1") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt index b728334995..d7238700bd 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt @@ -180,7 +180,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt index 0e677249de..12fd62832f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt @@ -178,7 +178,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt index ca39c0c712..0a954ff579 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt @@ -122,7 +122,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -141,8 +141,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt index 9184a85857..9db846d27a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt @@ -112,7 +112,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -132,8 +132,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt index 2f7bc1b5ce..cec9c6a266 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt @@ -138,7 +138,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -157,8 +157,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt index 905ba443e3..eddaecfc04 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt @@ -130,7 +130,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -150,8 +150,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt index 4eb0d2067f..b5ef64ebdc 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt @@ -160,7 +160,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -179,8 +179,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt index 2e1992a80d..c1e0f2eddb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt @@ -149,7 +149,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -169,8 +169,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt index 9ebb076924..de5e39f5b4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt @@ -148,7 +148,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt index 7c96e0c420..2b4dc1b281 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt @@ -137,7 +137,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt index b0f9ea4a60..07f86fd157 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt index 53ea0af8be..a9cdce7773 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt index 554ae22210..c7f6fb0831 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt index fba059b653..3a4a22dca7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt index 0e6fba6d82..0fedcb6313 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt index 57b89768d0..57555a69f5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt index bab0d0afc7..23201284d5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -44,8 +44,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -61,8 +61,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -75,13 +75,13 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -94,19 +94,19 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("three") .build() @@ -124,8 +124,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -146,7 +146,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -165,8 +165,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -179,13 +179,13 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("three") .build() @@ -203,8 +203,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -220,8 +220,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/three") - .header("zilla:topic", "sensor") - .header("zilla:topic", "three") + .header("zilla:filter", "sensor") + .header("zilla:filter", "three") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt index b1ded1a193..817d4cfab2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -64,8 +64,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -79,13 +79,13 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -99,19 +99,19 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("three") .build() @@ -130,8 +130,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -148,7 +148,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -168,8 +168,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -185,13 +185,13 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("three") .build() @@ -209,8 +209,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -228,8 +228,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/three") - .header("zilla:topic", "sensor") - .header("zilla:topic", "three") + .header("zilla:filter", "sensor") + .header("zilla:filter", "three") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt index 0a11769047..b955b6f287 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -44,8 +44,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -61,8 +61,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -75,13 +75,13 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -97,8 +97,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -114,8 +114,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -133,8 +133,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -155,7 +155,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -174,8 +174,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -191,8 +191,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt index 3bc279f7f4..5640fc9783 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -64,8 +64,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -79,13 +79,13 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -104,8 +104,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -123,8 +123,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -143,8 +143,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -161,7 +161,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -183,8 +183,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -202,8 +202,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt index d2a71faf71..526b42394f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -42,8 +42,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -69,13 +69,13 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -96,8 +96,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -110,7 +110,7 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -126,8 +126,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -140,13 +140,13 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -164,8 +164,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -185,7 +185,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -204,8 +204,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt index 8d43da6ad9..f1faf09a87 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -68,13 +68,13 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -94,8 +94,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -109,7 +109,7 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -127,8 +127,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -142,13 +142,13 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -166,8 +166,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -185,7 +185,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -205,8 +205,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt index 92859cd311..8e124ff4a4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -44,8 +44,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -61,8 +61,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -75,13 +75,13 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -99,8 +99,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -116,8 +116,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -133,8 +133,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -153,7 +153,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -172,8 +172,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -189,8 +189,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt index b2f8912a8d..09a1c02153 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -64,8 +64,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -79,13 +79,13 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -104,8 +104,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -123,8 +123,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -142,8 +142,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -160,7 +160,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -180,8 +180,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -199,8 +199,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/two") - .header("zilla:topic", "sensor") - .header("zilla:topic", "two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt index b865d030ac..a9be513247 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -42,8 +42,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt index 76dac38228..7ec7c0a1e1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -64,8 +64,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt index 03e7f9dbbf..c44e724db0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -42,8 +42,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt index 5d1ee60fce..53cd0c9dfa 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt index c952290dd9..a57afd171b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -42,8 +42,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .headerInt("zilla:timeout-ms", 15000) .header("zilla:content-type", "message") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt index 710ae2fb00..2ab07d5427 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .headerInt("zilla:timeout-ms", 15000) .header("zilla:content-type", "message") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt index 4eb92d03a2..2eae72f9d6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -42,8 +42,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .header("row1", "1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt index e9bede539e..6cfe4c4e2e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .header("row1", "1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt index b865d030ac..a9be513247 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -42,8 +42,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt index ec9540c790..d309133aad 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt index d1e22a07b8..22bcf36bbb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -44,8 +44,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client2") .header("zilla:format", "TEXT") .build() @@ -76,8 +76,8 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt index 21bce7ec08..6d725ff94c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -46,8 +46,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client2") .header("zilla:format", "TEXT") .build() @@ -79,8 +79,8 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt index a99b18fde2..d5318f36d2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .skipMany() @@ -40,14 +40,14 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .skipMany() .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .sequence(1) @@ -65,9 +65,9 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one/1") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") - .header("zilla:topic", "1") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:filter", "1") .header("zilla:local", "client2") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt index d1e3e9dcf2..cfbf2e195c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .skipMany() @@ -42,14 +42,14 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .skipMany() .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .sequence(1) @@ -68,9 +68,9 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one/1") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") - .header("zilla:topic", "1") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:filter", "1") .header("zilla:local", "client2") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt index 2280a88751..fda180c3ea 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .build() @@ -42,8 +42,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt index 4b0ddeaf67..ccd7462d68 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .build() @@ -46,8 +46,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt index d58a20ae17..dcd25c5048 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -43,8 +43,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -70,7 +70,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -89,8 +89,8 @@ read zilla:data.ext ${kafka:matchDataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt index e11b2bc16a..e2b4ff4d5d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() @@ -68,7 +68,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -88,8 +88,8 @@ write zilla:data.ext ${kafka:dataEx() .progress(0, 2) .progress(1, 1) .key("sensor/one") - .header("zilla:topic", "sensor") - .header("zilla:topic", "one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") .header("zilla:local", "client") .header("zilla:format", "TEXT") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt index 3634d3a018..cb9063ac25 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -49,7 +49,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt index 5029a11921..fa1dba8698 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -46,7 +46,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt index a78a268f35..dba69761a5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt index 3ee4685f5d..14b873eb07 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_retained") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt index b7c2947199..4449ffa1e5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt index f750ba6e33..dcf3c7e1b8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt index 7e23945e68..80622344b5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt index 769380e04a..ea8220fb1d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt index aa1cceafcb..0a278837fe 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt index 7a0609532c..b7fae7c2b1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt index eb9e9456b6..64893d8fa8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skipMany() .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt index 3f731f0f29..e99cd37353 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skipMany() .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt index 5678d76dbd..c7839119ce 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .sequence(1) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt index 2b497d8eb0..d6fc5bac27 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .sequence(1) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt index 837a263e35..d1e5384a94 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt index 79017a9ae0..b196c48821 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt index cfd88f651b..2a8e12eeb4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .skip(1) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt index 1861b76c4b..d79e051bb1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .skip(1) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt index 71623130fd..b388d28b7e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt @@ -23,13 +23,13 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt index ee7855019f..62719bea04 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt @@ -25,13 +25,13 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt index ce061a5158..72d62a020f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt @@ -23,13 +23,13 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("device") .skipMany() .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt index a59d16c0ff..bc2bf474bf 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt @@ -25,13 +25,13 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("device") .skipMany() .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt index 961aa5b7da..98be76b333 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -40,13 +40,13 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt index a09ec9ad48..c102d10f74 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -41,13 +41,13 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index 39be054da7..848af3483f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -39,13 +39,13 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("device") .skipMany() .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index b9347e2956..0222a9b864 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() @@ -41,13 +41,13 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("device") .skipMany() .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt index 7d46cfe584..9b0fbef39a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt @@ -23,14 +23,14 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .skipMany() .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .sequence("1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt index 34778e844f..e2358e7b12 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt @@ -25,14 +25,14 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .skipMany() .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .skip(1) .sequence("1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt index 3f1848188a..413d0be841 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt @@ -23,7 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt index d5ff77cc79..3ca0a0c806 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt index d5c1573ee4..b7a444fd76 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt @@ -23,13 +23,13 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -45,7 +45,7 @@ write advise zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt index e9a955fe0b..9482350d03 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt @@ -25,13 +25,13 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("FETCH_ONLY") .topic("mqtt_messages") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("one") .build() .build() .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() @@ -47,7 +47,7 @@ read advised zilla:flush ${kafka:flushEx() .merged() .capabilities("FETCH_ONLY") .filter() - .headers("zilla:topic") + .headers("zilla:filter") .sequence("sensor") .sequence("two") .build() diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java index 48bd6128cf..3883420442 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java @@ -28,7 +28,8 @@ public class MqttKafkaHeaderHelper { - private static final String KAFKA_TOPIC_HEADER_NAME = "zilla:topic"; + private static final String KAFKA_FILTER_HEADER_NAME = "zilla:filter"; + private static final String KAFKA_REPLY_FILTER_HEADER_NAME = "zilla:reply-filter"; private static final String KAFKA_LOCAL_HEADER_NAME = "zilla:local"; @@ -39,44 +40,53 @@ public class MqttKafkaHeaderHelper private static final String KAFKA_FORMAT_HEADER_NAME = "zilla:format"; private static final String KAFKA_REPLY_TO_HEADER_NAME = "zilla:reply-to"; + private static final String KAFKA_REPLY_KEY_HEADER_NAME = "zilla:reply-key"; private static final String KAFKA_CORRELATION_ID_HEADER_NAME = "zilla:correlation-id"; - public final OctetsFW kafkaTopicHeaderName; + public final OctetsFW kafkaFilterHeaderName; + public final OctetsFW kafkaReplyFilterHeaderName; public final OctetsFW kafkaLocalHeaderName; public final OctetsFW kafkaTimeoutHeaderName; public final OctetsFW kafkaContentTypeHeaderName; public final OctetsFW kafkaFormatHeaderName; public final OctetsFW kafkaReplyToHeaderName; + public final OctetsFW kafkaReplyKeyHeaderName; public final OctetsFW kafkaCorrelationHeaderName; private final Map> visitors; public final OctetsFW contentTypeRO = new OctetsFW(); public final OctetsFW replyToRO = new OctetsFW(); + public final OctetsFW replyKeyRO = new OctetsFW(); public int timeout; public OctetsFW contentType; public String format; public OctetsFW replyTo; + public OctetsFW replyKey; public OctetsFW correlation; public IntArrayList userPropertiesOffsets; public MqttKafkaHeaderHelper() { - kafkaTopicHeaderName = stringToOctets(KAFKA_TOPIC_HEADER_NAME); + kafkaFilterHeaderName = stringToOctets(KAFKA_FILTER_HEADER_NAME); + kafkaReplyFilterHeaderName = stringToOctets(KAFKA_REPLY_FILTER_HEADER_NAME); kafkaLocalHeaderName = stringToOctets(KAFKA_LOCAL_HEADER_NAME); kafkaTimeoutHeaderName = stringToOctets(KAFKA_TIMEOUT_HEADER_NAME); kafkaContentTypeHeaderName = stringToOctets(KAFKA_CONTENT_TYPE_HEADER_NAME); kafkaFormatHeaderName = stringToOctets(KAFKA_FORMAT_HEADER_NAME); kafkaReplyToHeaderName = stringToOctets(KAFKA_REPLY_TO_HEADER_NAME); + kafkaReplyKeyHeaderName = stringToOctets(KAFKA_REPLY_KEY_HEADER_NAME); kafkaCorrelationHeaderName = stringToOctets(KAFKA_CORRELATION_ID_HEADER_NAME); visitors = new HashMap<>(); - visitors.put(kafkaTopicHeaderName, this::skip); + visitors.put(kafkaFilterHeaderName, this::skip); + visitors.put(kafkaReplyFilterHeaderName, this::skip); visitors.put(kafkaLocalHeaderName, this::skip); visitors.put(kafkaTimeoutHeaderName, this::visitTimeout); visitors.put(kafkaContentTypeHeaderName, this::visitContentType); visitors.put(kafkaFormatHeaderName, this::visitFormat); visitors.put(kafkaReplyToHeaderName, this::visitReplyTo); + visitors.put(kafkaReplyKeyHeaderName, this::visitReplyKey); visitors.put(kafkaCorrelationHeaderName, this::visitCorrelationId); } @@ -133,6 +143,12 @@ private void visitReplyTo( replyTo = replyToRO.wrap(value.buffer(), value.offset(), value.limit()); } + private void visitReplyKey( + OctetsFW value) + { + replyKey = replyKeyRO.wrap(value.buffer(), value.offset(), value.limit()); + } + private void visitCorrelationId( OctetsFW value) { diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java index c4e934ecf6..bcba510b97 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java @@ -64,6 +64,7 @@ public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory private static final KafkaAckMode KAFKA_DEFAULT_ACK_MODE = KafkaAckMode.LEADER_ONLY; private static final String MQTT_TYPE_NAME = "mqtt"; private static final String KAFKA_TYPE_NAME = "kafka"; + private static final byte SLASH_BYTE = (byte) '/'; private final OctetsFW emptyRO = new OctetsFW().wrap(new UnsafeBuffer(0L, 0), 0, 0); private final BeginFW beginRO = new BeginFW(); @@ -327,7 +328,7 @@ private void onMqttData( for (OctetsFW topicHeader : topicNameHeaders) { - addHeader(helper.kafkaTopicHeaderName, topicHeader); + addHeader(helper.kafkaFilterHeaderName, topicHeader); } addHeader(helper.kafkaLocalHeaderName, clientIdOctets); @@ -357,7 +358,11 @@ private void onMqttData( if (mqttPublishDataEx.responseTopic().asString() != null) { - addHeader(helper.kafkaReplyToHeaderName, mqttPublishDataEx.responseTopic()); + final String16FW responseTopic = mqttPublishDataEx.responseTopic(); + addHeader(helper.kafkaReplyToHeaderName, kafkaMessagesTopic); + addHeader(helper.kafkaReplyKeyHeaderName, responseTopic); + + addFiltersHeader(responseTopic); } if (mqttPublishDataEx.correlation().bytes() != null) @@ -402,6 +407,25 @@ private void onMqttData( } } + private void addFiltersHeader( + String16FW responseTopic) + { + final DirectBuffer responseBuffer = responseTopic.value(); + final int capacity = responseBuffer.capacity(); + + int offset = 0; + int matchAt = 0; + while (offset >= 0 && offset < capacity && matchAt != -1) + { + matchAt = indexOfByte(responseBuffer, offset, capacity, SLASH_BYTE); + if (matchAt != -1) + { + addHeader(helper.kafkaReplyFilterHeaderName, responseBuffer, offset, matchAt - offset); + offset = matchAt + 1; + } + } + addHeader(helper.kafkaReplyFilterHeaderName, responseBuffer, offset, capacity - offset); + } private void onMqttEnd( EndFW end) @@ -625,6 +649,21 @@ private void addHeader( }); } + private void addHeader( + OctetsFW key, + DirectBuffer buffer, + int offset, + int length) + { + kafkaHeadersRW.item(h -> + { + h.nameLen(key.sizeof()); + h.name(key); + h.valueLen(length); + h.value(buffer, offset, length); + }); + } + private void addHeader(String16FW key, String16FW value) { DirectBuffer keyBuffer = key.value(); @@ -638,6 +677,24 @@ private void addHeader(String16FW key, String16FW value) }); } + private static int indexOfByte( + DirectBuffer buffer, + int offset, + int limit, + byte value) + { + int byteAt = -1; + for (int index = offset; index < limit; index++) + { + if (buffer.getByte(index) == value) + { + byteAt = index; + break; + } + } + return byteAt; + } + final class KafkaMessagesProxy { diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java index 36bcbfe998..c52e372820 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java @@ -1863,8 +1863,8 @@ private void buildHeaders( String[] headers = pattern.split("/"); conditionBuilder.headers(hb -> { - hb.nameLen(helper.kafkaTopicHeaderName.sizeof()); - hb.name(helper.kafkaTopicHeaderName); + hb.nameLen(helper.kafkaFilterHeaderName.sizeof()); + hb.name(helper.kafkaFilterHeaderName); for (String header : headers) { if (header.equals(MQTT_SINGLE_LEVEL_WILDCARD)) From 1bcf5dd8143c23ae4b54b2baea04a393c49310ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 18:24:05 -0700 Subject: [PATCH 049/115] Bump org.apache.maven.plugins:maven-jar-plugin from 3.2.0 to 3.3.0 (#357) Bumps [org.apache.maven.plugins:maven-jar-plugin](https://github.com/apache/maven-jar-plugin) from 3.2.0 to 3.3.0. - [Release notes](https://github.com/apache/maven-jar-plugin/releases) - [Commits](https://github.com/apache/maven-jar-plugin/compare/maven-jar-plugin-3.2.0...maven-jar-plugin-3.3.0) --- updated-dependencies: - dependency-name: org.apache.maven.plugins:maven-jar-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 16dc0902ca..20470b04cc 100644 --- a/pom.xml +++ b/pom.xml @@ -402,7 +402,7 @@ org.apache.maven.plugins maven-jar-plugin - 3.2.0 + 3.3.0 org.apache.maven.plugins From 2143f7d41ee1b1a31ea95a1eec8aa57dea04471f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Aug 2023 08:57:41 -0700 Subject: [PATCH 050/115] Bump eclipse-temurin in /cloud/docker-image/src/main/docker/incubator (#342) Bumps eclipse-temurin from 17-alpine to 20-alpine. --- updated-dependencies: - dependency-name: eclipse-temurin dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cloud/docker-image/src/main/docker/incubator/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker-image/src/main/docker/incubator/Dockerfile b/cloud/docker-image/src/main/docker/incubator/Dockerfile index 905f4f53b8..78e73cc2dd 100644 --- a/cloud/docker-image/src/main/docker/incubator/Dockerfile +++ b/cloud/docker-image/src/main/docker/incubator/Dockerfile @@ -13,7 +13,7 @@ # specific language governing permissions and limitations under the License. # -FROM eclipse-temurin:17-alpine AS build +FROM eclipse-temurin:20-alpine AS build COPY maven /root/.m2/repository From 78f1accf6db5f8c12a739b990cf1fd0ad22fdafe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Aug 2023 08:57:55 -0700 Subject: [PATCH 051/115] Bump eclipse-temurin in /cloud/docker-image/src/main/docker/release (#340) Bumps eclipse-temurin from 17-alpine to 20-alpine. --- updated-dependencies: - dependency-name: eclipse-temurin dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cloud/docker-image/src/main/docker/release/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker-image/src/main/docker/release/Dockerfile b/cloud/docker-image/src/main/docker/release/Dockerfile index 905f4f53b8..78e73cc2dd 100644 --- a/cloud/docker-image/src/main/docker/release/Dockerfile +++ b/cloud/docker-image/src/main/docker/release/Dockerfile @@ -13,7 +13,7 @@ # specific language governing permissions and limitations under the License. # -FROM eclipse-temurin:17-alpine AS build +FROM eclipse-temurin:20-alpine AS build COPY maven /root/.m2/repository From 7a88335a066b65e9bad2ff3a778142f1cf51da7b Mon Sep 17 00:00:00 2001 From: John Fallows Date: Tue, 15 Aug 2023 10:06:09 -0700 Subject: [PATCH 052/115] Support dynamic behavior injection in config builder fluent API (#358) --- .../HttpAccessControlConfigBuilder.java | 9 +++++++- .../http/config/HttpAllowConfigBuilder.java | 9 +++++++- .../HttpAuthorizationConfigBuilder.java | 9 +++++++- .../config/HttpConditionConfigBuilder.java | 9 +++++++- .../config/HttpCredentialsConfigBuilder.java | 9 +++++++- .../http/config/HttpExposeConfigBuilder.java | 11 +++++++-- .../http/config/HttpOptionsConfigBuilder.java | 9 +++++++- .../http/config/HttpPatternConfigBuilder.java | 9 +++++++- .../HttpConditionConfigAdapterTest.java | 2 ++ .../config/HttpOptionsConfigAdapterTest.java | 7 ++++++ .../tcp/config/TcpConditionConfigBuilder.java | 9 +++++++- .../tcp/config/TcpOptionsConfigBuilder.java | 9 +++++++- .../config/TcpConditionConfigAdapterTest.java | 2 ++ .../config/TcpOptionsConfigAdapterTest.java | 3 +++ .../tls/config/TlsConditionConfigBuilder.java | 9 +++++++- .../tls/config/TlsOptionsConfigBuilder.java | 9 +++++++- .../config/TlsConditionConfigAdapterTest.java | 2 ++ .../config/TlsOptionsConfigAdapterTest.java | 9 ++++++++ .../internal/airline/ZillaStartCommand.java | 1 + .../engine/config/AttributeConfigBuilder.java | 9 +++++++- .../engine/config/BindingConfigBuilder.java | 11 +++++++-- .../runtime/engine/config/ConfigBuilder.java | 14 +++++++++-- .../engine/config/ExporterConfigBuilder.java | 11 +++++++-- .../engine/config/GuardConfigBuilder.java | 11 +++++++-- .../engine/config/GuardedConfigBuilder.java | 9 +++++++- .../engine/config/MetricConfigBuilder.java | 9 +++++++- .../engine/config/MetricRefConfigBuilder.java | 9 +++++++- .../engine/config/NamespaceConfigBuilder.java | 9 +++++++- .../config/NamespaceRefConfigBuilder.java | 9 +++++++- .../engine/config/RouteConfigBuilder.java | 13 ++++++++--- .../engine/config/TelemetryConfigBuilder.java | 9 +++++++- .../config/TelemetryRefConfigBuilder.java | 9 +++++++- .../engine/config/VaultConfigBuilder.java | 11 +++++++-- .../engine/config/ConfigWriterTest.java | 5 ++++ .../config/BindingConfigsAdapterTest.java | 6 +++++ .../config/ConditionConfigAdapterTest.java | 9 +++++++- .../config/NamespaceConfigAdapterTest.java | 6 +++++ .../config/NamespaceRefConfigAdapterTest.java | 2 ++ .../config/RouteConfigAdapterTest.java | 5 ++++ .../config/TelemetryConfigsAdapterTest.java | 10 ++++++++ .../TestBindingOptionsConfigBuilder.java | 9 +++++++- .../TestExporterOptionsConfigAdapter.java | 5 +++- .../TestExporterOptionsConfigBuilder.java | 9 +++++++- .../config/TestGuardOptionsConfigBuilder.java | 9 +++++++- .../config/TestVaultOptionsConfigBuilder.java | 9 +++++++- .../guard/jwt/config/JwtKeyConfigBuilder.java | 9 +++++++- .../jwt/config/JwtOptionsConfigBuilder.java | 9 +++++++- .../jwt/internal/JwtGuardHandlerTest.java | 20 ++++++++++++++++ .../guard/jwt/internal/JwtGuardTest.java | 23 +++++++++++++++++++ .../FileSystemOptionsConfigBuilder.java | 9 +++++++- .../config/FileSystemStoreConfigBuilder.java | 9 +++++++- .../FileSystemOptionsConfigAdapterTest.java | 3 +++ 52 files changed, 401 insertions(+), 44 deletions(-) diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfigBuilder.java index 153e964d2b..dc3970344c 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAccessControlConfigBuilder.java @@ -20,7 +20,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class HttpAccessControlConfigBuilder implements ConfigBuilder +public final class HttpAccessControlConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -35,6 +35,13 @@ public final class HttpAccessControlConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + public Class> thisType() + { + return (Class>) getClass(); + } + public HttpAccessControlConfigBuilder policy( HttpPolicyConfig policy) { diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfigBuilder.java index 126f246b31..50e6d6fd4c 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAllowConfigBuilder.java @@ -21,7 +21,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class HttpAllowConfigBuilder implements ConfigBuilder +public final class HttpAllowConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -36,6 +36,13 @@ public final class HttpAllowConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public HttpAllowConfigBuilder origin( String origin) { diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfigBuilder.java index d27ff19cb5..5f16beb53b 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpAuthorizationConfigBuilder.java @@ -19,7 +19,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class HttpAuthorizationConfigBuilder implements ConfigBuilder +public final class HttpAuthorizationConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -32,6 +32,13 @@ public final class HttpAuthorizationConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public HttpAuthorizationConfigBuilder name( String name) { diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfigBuilder.java index 45745fbe98..fb5d90a04b 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpConditionConfigBuilder.java @@ -22,7 +22,7 @@ import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class HttpConditionConfigBuilder implements ConfigBuilder +public final class HttpConditionConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -34,6 +34,13 @@ public final class HttpConditionConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public HttpConditionConfigBuilder header( String name, String value) diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfigBuilder.java index 28b0c3f0e6..1f3aa54517 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpCredentialsConfigBuilder.java @@ -21,7 +21,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class HttpCredentialsConfigBuilder implements ConfigBuilder +public final class HttpCredentialsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -35,6 +35,13 @@ public final class HttpCredentialsConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public HttpPatternConfigBuilder> header() { return new HttpPatternConfigBuilder<>(this::header); diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfigBuilder.java index 2734a3ae1a..9c393f2e8b 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpExposeConfigBuilder.java @@ -21,18 +21,25 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class HttpExposeConfigBuilder implements ConfigBuilder +public final class HttpExposeConfigBuilder extends ConfigBuilder> { private final Function mapper; private Set headers; - public HttpExposeConfigBuilder( + HttpExposeConfigBuilder( Function mapper) { this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public HttpExposeConfigBuilder header( String header) { diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfigBuilder.java index 23dec3727a..21c3db207e 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpOptionsConfigBuilder.java @@ -26,7 +26,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public final class HttpOptionsConfigBuilder implements ConfigBuilder +public final class HttpOptionsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -41,6 +41,13 @@ public final class HttpOptionsConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public HttpOptionsConfigBuilder version( HttpVersion version) { diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfigBuilder.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfigBuilder.java index 2cc4485696..5f5dda32e9 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfigBuilder.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/config/HttpPatternConfigBuilder.java @@ -19,7 +19,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class HttpPatternConfigBuilder implements ConfigBuilder +public final class HttpPatternConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -32,6 +32,13 @@ public final class HttpPatternConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public HttpPatternConfigBuilder name( String name) { diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java index fc30e64ad1..4be0bc20a7 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpConditionConfigAdapterTest.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.binding.http.internal.config; import static java.util.Collections.singletonMap; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -63,6 +64,7 @@ public void shouldReadCondition() public void shouldWriteCondition() { HttpConditionConfig condition = HttpConditionConfig.builder() + .inject(identity()) .header(":authority", "example.net:443") .build(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java index c96a0e45da..a9c8c60e6c 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpOptionsConfigAdapterTest.java @@ -18,6 +18,7 @@ import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.CROSS_ORIGIN; import static java.util.Collections.singleton; import static java.util.Collections.singletonMap; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -125,12 +126,15 @@ public void shouldReadOptions() public void shouldWriteOptions() { HttpOptionsConfig options = HttpOptionsConfig.builder() + .inject(identity()) .version(HttpVersion.HTTP_1_1) .version(HttpVersion.HTTP_2) .override(new String8FW(":authority"), new String16FW("example.com:443")) .access() + .inject(identity()) .policy(CROSS_ORIGIN) .allow() + .inject(identity()) .origin("https://example.com:9090") .method("DELETE") .header("x-api-key") @@ -138,13 +142,16 @@ public void shouldWriteOptions() .build() .maxAge(Duration.ofSeconds(10)) .expose() + .inject(identity()) .header("x-custom-header") .build() .build() .authorization() .name("test0") .credentials() + .inject(identity()) .header() + .inject(identity()) .name("authorization") .pattern("Bearer {credentials}") .build() diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfigBuilder.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfigBuilder.java index d1dd01bdb8..a597932f67 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfigBuilder.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpConditionConfigBuilder.java @@ -20,7 +20,7 @@ import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class TcpConditionConfigBuilder implements ConfigBuilder +public final class TcpConditionConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -34,6 +34,13 @@ public final class TcpConditionConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public TcpConditionConfigBuilder cidr( String cidr) { diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfigBuilder.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfigBuilder.java index fedbda81f3..7a858222e0 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfigBuilder.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/config/TcpOptionsConfigBuilder.java @@ -20,7 +20,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public final class TcpOptionsConfigBuilder implements ConfigBuilder +public final class TcpOptionsConfigBuilder extends ConfigBuilder> { public static final int BACKLOG_DEFAULT = 0; public static final boolean NODELAY_DEFAULT = true; @@ -40,6 +40,13 @@ public final class TcpOptionsConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public TcpOptionsConfigBuilder host( String host) { diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java index 75f25e5f29..8a095f9371 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpConditionConfigAdapterTest.java @@ -15,6 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.tcp.internal.config; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -65,6 +66,7 @@ public void shouldReadCondition() public void shouldWriteCondition() { TcpConditionConfig condition = TcpConditionConfig.builder() + .inject(identity()) .cidr("127.0.0.0/24") .authority("*.example.net") .ports(new int[] { 8080 }) diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java index 47d6b16d3f..7a0e9db8cf 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/config/TcpOptionsConfigAdapterTest.java @@ -15,6 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.tcp.internal.config; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -100,6 +101,7 @@ public void shouldReadOptionsWithPortRangeSingleton() public void shouldWriteOptions() { TcpOptionsConfig options = TcpOptionsConfig.builder() + .inject(identity()) .host("localhost") .ports(new int[] { 8080 }) .build(); @@ -134,6 +136,7 @@ public void shouldReadOptionsWithBacklog() public void shouldWriteOptionsWithBacklog() { TcpOptionsConfig options = TcpOptionsConfig.builder() + .inject(identity()) .host("localhost") .ports(new int[] { 8080 }) .backlog(1000) diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfigBuilder.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfigBuilder.java index 4d6ee3820b..e1f6aa52f5 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfigBuilder.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsConditionConfigBuilder.java @@ -20,7 +20,7 @@ import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class TlsConditionConfigBuilder implements ConfigBuilder +public final class TlsConditionConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -33,6 +33,13 @@ public final class TlsConditionConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public TlsConditionConfigBuilder authority( String authority) { diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfigBuilder.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfigBuilder.java index 498d59321a..aaba789b99 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfigBuilder.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/config/TlsOptionsConfigBuilder.java @@ -23,7 +23,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public final class TlsOptionsConfigBuilder implements ConfigBuilder +public final class TlsOptionsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -42,6 +42,13 @@ public final class TlsOptionsConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public TlsOptionsConfigBuilder version( String version) { diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java index 537883ad09..a53a9b2a37 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsConditionConfigAdapterTest.java @@ -15,6 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.tls.internal.config; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -61,6 +62,7 @@ public void shouldReadCondition() public void shouldWriteCondition() { TlsConditionConfig condition = TlsConditionConfig.builder() + .inject(identity()) .authority("example.net") .alpn("echo") .build(); diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java index 4f88b714e5..832cd391b4 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsOptionsConfigAdapterTest.java @@ -17,6 +17,7 @@ import static io.aklivity.zilla.runtime.binding.tls.config.TlsMutualConfig.REQUESTED; import static java.util.Arrays.asList; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -61,6 +62,7 @@ public void shouldReadOptions() public void shouldWriteOptions() { TlsOptionsConfig options = TlsOptionsConfig.builder() + .inject(identity()) .version("TLSv1.2") .build(); @@ -88,6 +90,7 @@ public void shouldReadOptionsWithKeys() public void shouldWriteOptionsWithKeys() { TlsOptionsConfig options = TlsOptionsConfig.builder() + .inject(identity()) .keys(asList("localhost")) .build(); @@ -115,6 +118,7 @@ public void shouldReadOptionsWithTrust() public void shouldWriteOptionsWithTrust() { TlsOptionsConfig options = TlsOptionsConfig.builder() + .inject(identity()) .trust(asList("serverca")) .build(); @@ -142,6 +146,7 @@ public void shouldReadOptionsWithTrustcacerts() public void shouldWriteOptionsWithTrustcacerts() { TlsOptionsConfig options = TlsOptionsConfig.builder() + .inject(identity()) .trustcacerts(true) .build(); @@ -169,6 +174,7 @@ public void shouldReadOptionsWithServerName() public void shouldWriteOptionsWithServerName() { TlsOptionsConfig options = TlsOptionsConfig.builder() + .inject(identity()) .sni(asList("example.net")) .build(); @@ -196,6 +202,7 @@ public void shouldReadOptionsWithAlpn() public void shouldWriteOptionsWithAlpn() { TlsOptionsConfig options = TlsOptionsConfig.builder() + .inject(identity()) .alpn(asList("echo")) .build(); @@ -223,6 +230,7 @@ public void shouldReadOptionsWithMutual() public void shouldWriteOptionsWithMutual() { TlsOptionsConfig options = TlsOptionsConfig.builder() + .inject(identity()) .mutual(REQUESTED) .build(); @@ -250,6 +258,7 @@ public void shouldReadOptionsWithSigners() public void shouldWriteOptionsWithSigners() { TlsOptionsConfig options = TlsOptionsConfig.builder() + .inject(identity()) .signers(asList("clientca")) .build(); diff --git a/runtime/command-start/src/main/java/io/aklivity/zilla/runtime/command/start/internal/airline/ZillaStartCommand.java b/runtime/command-start/src/main/java/io/aklivity/zilla/runtime/command/start/internal/airline/ZillaStartCommand.java index 9a56e2e41d..1cbb02786c 100644 --- a/runtime/command-start/src/main/java/io/aklivity/zilla/runtime/command/start/internal/airline/ZillaStartCommand.java +++ b/runtime/command-start/src/main/java/io/aklivity/zilla/runtime/command/start/internal/airline/ZillaStartCommand.java @@ -176,6 +176,7 @@ public void run() catch (Throwable ex) { System.out.println("error"); + onError.onError(ex); rethrowUnchecked(ex); } } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfigBuilder.java index 0eea7cd59c..9cd42de88f 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/AttributeConfigBuilder.java @@ -17,7 +17,7 @@ import java.util.function.Function; -public final class AttributeConfigBuilder implements ConfigBuilder +public final class AttributeConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -30,6 +30,13 @@ public final class AttributeConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public AttributeConfigBuilder name( String name) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java index 00a7d7bdfd..43a56118ef 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java @@ -22,7 +22,7 @@ import java.util.Optional; import java.util.function.Function; -public final class BindingConfigBuilder implements ConfigBuilder +public final class BindingConfigBuilder extends ConfigBuilder> { public static final List ROUTES_DEFAULT = emptyList(); @@ -43,6 +43,13 @@ public final class BindingConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public BindingConfigBuilder vault( String vault) { @@ -78,7 +85,7 @@ public BindingConfigBuilder entry( return this; } - public >> C options( + public , C>> C options( Function>, C> options) { return options.apply(this::options); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigBuilder.java index 12491b5d63..7354593168 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigBuilder.java @@ -15,7 +15,17 @@ */ package io.aklivity.zilla.runtime.engine.config; -public interface ConfigBuilder +import java.util.function.Function; + +public abstract class ConfigBuilder> { - T build(); + protected abstract Class thisType(); + + public final R inject( + Function visitor) + { + return visitor.apply(thisType().cast(this)); + } + + public abstract T build(); } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfigBuilder.java index eeb3bf5503..1eb6db05b5 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ExporterConfigBuilder.java @@ -17,7 +17,7 @@ import java.util.function.Function; -public final class ExporterConfigBuilder implements ConfigBuilder +public final class ExporterConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -31,6 +31,13 @@ public final class ExporterConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public ExporterConfigBuilder name( String name) { @@ -45,7 +52,7 @@ public ExporterConfigBuilder type( return this; } - public >> C options( + public , C>> C options( Function>, C> options) { return options.apply(this::options); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfigBuilder.java index ff1e64dea1..9b441e35b1 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardConfigBuilder.java @@ -17,7 +17,7 @@ import java.util.function.Function; -public final class GuardConfigBuilder implements ConfigBuilder +public final class GuardConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -31,6 +31,13 @@ public final class GuardConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public GuardConfigBuilder name( String name) { @@ -45,7 +52,7 @@ public GuardConfigBuilder type( return this; } - public >> C options( + public , C>> C options( Function>, C> options) { return options.apply(this::options); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfigBuilder.java index bccffad9ec..66213bfc09 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/GuardedConfigBuilder.java @@ -22,7 +22,7 @@ import java.util.Optional; import java.util.function.Function; -public final class GuardedConfigBuilder implements ConfigBuilder +public final class GuardedConfigBuilder extends ConfigBuilder> { public static final List ROLES_DEFAULT = emptyList(); @@ -37,6 +37,13 @@ public final class GuardedConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public GuardedConfigBuilder name( String name) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfigBuilder.java index 960d644931..34137b9f92 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricConfigBuilder.java @@ -17,7 +17,7 @@ import java.util.function.Function; -public final class MetricConfigBuilder implements ConfigBuilder +public final class MetricConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -30,6 +30,13 @@ public final class MetricConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public MetricConfigBuilder group( String group) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfigBuilder.java index c114c7400b..712529c9d5 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/MetricRefConfigBuilder.java @@ -17,7 +17,7 @@ import java.util.function.Function; -public final class MetricRefConfigBuilder implements ConfigBuilder +public final class MetricRefConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -29,6 +29,13 @@ public final class MetricRefConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public MetricRefConfigBuilder name( String name) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfigBuilder.java index 29ce72beaa..68a2899d8f 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceConfigBuilder.java @@ -22,7 +22,7 @@ import java.util.Optional; import java.util.function.Function; -public class NamespaceConfigBuilder +public final class NamespaceConfigBuilder extends ConfigBuilder> { public static final List NAMESPACES_DEFAULT = emptyList(); public static final List BINDINGS_DEFAULT = emptyList(); @@ -45,6 +45,13 @@ public class NamespaceConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public NamespaceConfigBuilder name( String name) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfigBuilder.java index 86525681a5..b49f33e45a 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/NamespaceRefConfigBuilder.java @@ -22,7 +22,7 @@ import java.util.Optional; import java.util.function.Function; -public final class NamespaceRefConfigBuilder implements ConfigBuilder +public final class NamespaceRefConfigBuilder extends ConfigBuilder> { public static final Map LINKS_DEFAULT = emptyMap(); @@ -37,6 +37,13 @@ public final class NamespaceRefConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public NamespaceRefConfigBuilder name( String name) { diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfigBuilder.java index a7c6654609..0f7056ce50 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/RouteConfigBuilder.java @@ -22,7 +22,7 @@ import java.util.Optional; import java.util.function.Function; -public final class RouteConfigBuilder implements ConfigBuilder +public final class RouteConfigBuilder extends ConfigBuilder> { public static final List WHEN_DEFAULT = emptyList(); public static final List GUARDED_DEFAULT = emptyList(); @@ -41,6 +41,13 @@ public final class RouteConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public RouteConfigBuilder order( int order) { @@ -55,7 +62,7 @@ public RouteConfigBuilder exit( return this; } - public >> C when( + public , C>> C when( Function>, C> condition) { return condition.apply(this::when); @@ -72,7 +79,7 @@ public RouteConfigBuilder when( return this; } - public >> B with( + public , B>> B with( Function>, B> with) { return with.apply(this::with); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfigBuilder.java index a576b68c0e..9b36778e9c 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryConfigBuilder.java @@ -20,7 +20,7 @@ import java.util.Optional; import java.util.function.Function; -public final class TelemetryConfigBuilder implements ConfigBuilder +public final class TelemetryConfigBuilder extends ConfigBuilder> { public static final List ATTRIBUTES_DEFAULT = List.of(); public static final List METRICS_DEFAULT = List.of(); @@ -38,6 +38,13 @@ public final class TelemetryConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public AttributeConfigBuilder> attribute() { return new AttributeConfigBuilder<>(this::attribute); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfigBuilder.java index c59837ebb4..3b8503d92f 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/TelemetryRefConfigBuilder.java @@ -19,7 +19,7 @@ import java.util.List; import java.util.function.Function; -public final class TelemetryRefConfigBuilder implements ConfigBuilder +public final class TelemetryRefConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -31,6 +31,13 @@ public final class TelemetryRefConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public MetricRefConfigBuilder> metric() { return new MetricRefConfigBuilder<>(this::metric); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfigBuilder.java index 66f363ebf7..a545cf9038 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/VaultConfigBuilder.java @@ -19,7 +19,7 @@ import java.util.function.Function; -public final class VaultConfigBuilder implements ConfigBuilder +public final class VaultConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -33,6 +33,13 @@ public final class VaultConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public VaultConfigBuilder name( String name) { @@ -47,7 +54,7 @@ public VaultConfigBuilder type( return this; } - public >> C options( + public , C>> C options( Function>, C> options) { return options.apply(this::options); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java index 76136e3b6d..321da79077 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.config; import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -54,14 +55,18 @@ public void shouldWriteNamespace() NamespaceConfig config = NamespaceConfig.builder() .name("test") .binding() + .inject(identity()) .name("test0") .type("test") .kind(SERVER) .options(TestBindingOptionsConfig::builder) + .inject(identity()) .mode("test") .build() .route() + .inject(identity()) .when(TestConditionConfig::builder) + .inject(identity()) .match("test") .build() .exit("exit0") diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java index edc10a071e..53200c9213 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java @@ -17,6 +17,7 @@ import static io.aklivity.zilla.runtime.engine.config.KindConfig.REMOTE_SERVER; import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyCollectionOf; @@ -87,6 +88,7 @@ public void shouldWriteBinding() BindingConfig[] bindings = { BindingConfig.builder() + .inject(identity()) .name("test") .type("test") .kind(SERVER) @@ -130,6 +132,7 @@ public void shouldWriteBindingWithVault() BindingConfig[] bindings = { BindingConfig.builder() + .inject(identity()) .vault("test") .name("test") .type("test") @@ -178,6 +181,7 @@ public void shouldWriteBindingWithOptions() .type("test") .kind(SERVER) .options(TestBindingOptionsConfig::builder) + .inject(identity()) .mode("test") .build() .build() @@ -227,6 +231,7 @@ public void shouldWriteBindingWithExit() .type("test") .kind(SERVER) .route() + .inject(identity()) .exit("test") .build() .build() @@ -249,6 +254,7 @@ public void shouldWriteBindingWithRoute() .route() .exit("test") .guarded() + .inject(identity()) .name("test0") .role("read") .build() diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ConditionConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ConditionConfigAdapterTest.java index ef925a61ac..5f525cd250 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ConditionConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/ConditionConfigAdapterTest.java @@ -124,7 +124,7 @@ public static TestConditionConfigBuilder builder( } } - public static final class TestConditionConfigBuilder implements ConfigBuilder + public static final class TestConditionConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -136,6 +136,13 @@ public static final class TestConditionConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public TestConditionConfigBuilder match( String match) { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceConfigAdapterTest.java index cd96cc9547..cf92c2f525 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceConfigAdapterTest.java @@ -17,6 +17,7 @@ import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; import static java.util.Collections.emptyMap; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.equalTo; @@ -140,6 +141,7 @@ public void shouldReadNamespaceWithBinding() public void shouldWriteNamespaceWithBinding() { NamespaceConfig config = NamespaceConfig.builder() + .inject(identity()) .name("test") .binding() .name("test") @@ -185,6 +187,7 @@ public void shouldReadNamespaceWithGuard() public void shouldWriteNamespaceWithGuard() { NamespaceConfig config = NamespaceConfig.builder() + .inject(identity()) .name("test") .guard() .name("default") @@ -234,6 +237,7 @@ public void shouldReadNamespaceWithVault() public void shouldWriteNamespaceWithVault() { NamespaceConfig config = NamespaceConfig.builder() + .inject(identity()) .name("test") .vault() .name("default") @@ -281,6 +285,7 @@ public void shouldReadNamespaceWithTelemetry() public void shouldWriteNamespaceWithTelemetry() { NamespaceConfig config = NamespaceConfig.builder() + .inject(identity()) .name("test") .telemetry() .attribute() @@ -340,6 +345,7 @@ public void shouldReadNamespaceWithReference() public void shouldWriteNamespaceWithReference() { NamespaceConfig config = NamespaceConfig.builder() + .inject(identity()) .name("test") .namespace() .name("test") diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRefConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRefConfigAdapterTest.java index b47e8d6c0c..7d288844bd 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRefConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/NamespaceRefConfigAdapterTest.java @@ -17,6 +17,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -107,6 +108,7 @@ public void shouldReadReferenceWithLink() public void shouldWriteReferenceWithLink() { NamespaceRefConfig reference = NamespaceRefConfig.builder() + .inject(identity()) .name("test") .link("self", "/test") .build(); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/RouteConfigAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/RouteConfigAdapterTest.java index a858d54819..9ce6533307 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/RouteConfigAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/RouteConfigAdapterTest.java @@ -16,6 +16,7 @@ package io.aklivity.zilla.runtime.engine.internal.config; import static java.util.Collections.singletonList; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; @@ -110,8 +111,10 @@ public void shouldReadRouteGuarded() public void shouldWriteRouteGuarded() { RouteConfig route = RouteConfig.builder() + .inject(identity()) .exit("test") .guarded() + .inject(identity()) .name("test") .role("role") .build() @@ -147,8 +150,10 @@ public void shouldReadRouteWhenMatch() public void shouldWriteRouteWhenMatch() { RouteConfig route = RouteConfig.builder() + .inject(identity()) .exit("test") .when(TestConditionConfig::builder) + .inject(identity()) .match("test") .build() .build(); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryConfigsAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryConfigsAdapterTest.java index 6af2852127..78ea975eef 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryConfigsAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/TelemetryConfigsAdapterTest.java @@ -15,6 +15,7 @@ */ package io.aklivity.zilla.runtime.engine.internal.config; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -101,15 +102,19 @@ public void shouldWriteTelemetry() { // GIVEN TelemetryConfig telemetry = TelemetryConfig.builder() + .inject(identity()) .attribute() + .inject(identity()) .name("test.attribute") .value("example") .build() .metric() + .inject(identity()) .group("test") .name("test.counter") .build() .exporter() + .inject(identity()) .name("test0") .type("test") .build() @@ -176,18 +181,23 @@ public void shouldWriteTelemetryWithExporterOptions() { // GIVEN TelemetryConfig telemetry = TelemetryConfig.builder() + .inject(identity()) .attribute() + .inject(identity()) .name("test.attribute") .value("example") .build() .metric() + .inject(identity()) .group("test") .name("test.counter") .build() .exporter() + .inject(identity()) .name("test0") .type("test") .options(TestExporterOptionsConfig::builder) + .inject(identity()) .mode("test42") .build() .build() diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigBuilder.java index e85c4d8601..b8ecf148aa 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigBuilder.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/binding/config/TestBindingOptionsConfigBuilder.java @@ -20,7 +20,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public final class TestBindingOptionsConfigBuilder implements ConfigBuilder +public final class TestBindingOptionsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -32,6 +32,13 @@ public final class TestBindingOptionsConfigBuilder implements ConfigBuilder> thisType() + { + return (Class>) getClass(); + } + public TestBindingOptionsConfigBuilder mode( String mode) { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigAdapter.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigAdapter.java index 205e21972f..8694215761 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigAdapter.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigAdapter.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.engine.test.internal.exporter.config; +import static java.util.function.Function.identity; + import jakarta.json.Json; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; @@ -55,7 +57,8 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - TestExporterOptionsConfigBuilder testOptions = TestExporterOptionsConfig.builder(); + TestExporterOptionsConfigBuilder testOptions = TestExporterOptionsConfig.builder() + .inject(identity()); if (object != null) { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigBuilder.java index e77b486a6a..27abc1cbca 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigBuilder.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/exporter/config/TestExporterOptionsConfigBuilder.java @@ -20,7 +20,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public final class TestExporterOptionsConfigBuilder implements ConfigBuilder +public final class TestExporterOptionsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -32,6 +32,13 @@ public final class TestExporterOptionsConfigBuilder implements ConfigBuilder< this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public TestExporterOptionsConfigBuilder mode( String mode) { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigBuilder.java index a62617822e..314fb0ff8f 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigBuilder.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/guard/config/TestGuardOptionsConfigBuilder.java @@ -25,7 +25,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public final class TestGuardOptionsConfigBuilder implements ConfigBuilder +public final class TestGuardOptionsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -44,6 +44,13 @@ public final class TestGuardOptionsConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public TestGuardOptionsConfigBuilder credentials( String credentials) { diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigBuilder.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigBuilder.java index 4007f52d01..35a2545b8e 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigBuilder.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/internal/vault/config/TestVaultOptionsConfigBuilder.java @@ -20,7 +20,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public final class TestVaultOptionsConfigBuilder implements ConfigBuilder +public final class TestVaultOptionsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -32,6 +32,13 @@ public final class TestVaultOptionsConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public TestVaultOptionsConfigBuilder mode( String mode) { diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfigBuilder.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfigBuilder.java index f53515dab4..c95bf3acce 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfigBuilder.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtKeyConfigBuilder.java @@ -18,7 +18,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public class JwtKeyConfigBuilder implements ConfigBuilder +public class JwtKeyConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -38,6 +38,13 @@ public class JwtKeyConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public JwtKeyConfigBuilder kty( String kty) { diff --git a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfigBuilder.java b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfigBuilder.java index b2b2e6cf3d..7ae6d40e22 100644 --- a/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfigBuilder.java +++ b/runtime/guard-jwt/src/main/java/io/aklivity/zilla/runtime/guard/jwt/config/JwtOptionsConfigBuilder.java @@ -22,7 +22,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public class JwtOptionsConfigBuilder implements ConfigBuilder +public class JwtOptionsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -38,6 +38,13 @@ public class JwtOptionsConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public JwtOptionsConfigBuilder issuer( String issuer) { diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java index 90e9928cb4..88649c6a63 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardHandlerTest.java @@ -18,6 +18,7 @@ import static io.aklivity.zilla.specs.guard.jwt.keys.JwtKeys.RFC7515_RS256; import static java.time.Duration.ofSeconds; import static java.util.Arrays.asList; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -46,6 +47,7 @@ public void shouldAuthorize() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -78,6 +80,7 @@ public void shouldChallengeDuringChallengeWindow() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -106,6 +109,7 @@ public void shouldNotChallengeDuringWindowWithoutSubject() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -133,6 +137,7 @@ public void shouldNotChallengeBeforeChallengeWindow() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -161,6 +166,7 @@ public void shouldNotChallengeAgainDuringChallengeWindow() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -190,6 +196,7 @@ public void shouldNotChallengeAgainDuringChallengeWindow() throws Exception public void shouldNotAuthorizeWhenAlgorithmDiffers() throws Exception { JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -211,6 +218,7 @@ public void shouldNotAuthorizeWhenAlgorithmDiffers() throws Exception public void shouldNotAuthorizeWhenSignatureInvalid() throws Exception { JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -234,6 +242,7 @@ public void shouldNotAuthorizeWhenSignatureInvalid() throws Exception public void shouldNotAuthorizeWhenIssuerDiffers() throws Exception { JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -255,6 +264,7 @@ public void shouldNotAuthorizeWhenIssuerDiffers() throws Exception public void shouldNotAuthorizeWhenAudienceDiffers() throws Exception { JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -276,6 +286,7 @@ public void shouldNotAuthorizeWhenAudienceDiffers() throws Exception public void shouldNotAuthorizeWhenExpired() throws Exception { JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -300,6 +311,7 @@ public void shouldNotAuthorizeWhenExpired() throws Exception public void shouldNotAuthorizeWhenNotYetValid() throws Exception { JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -325,6 +337,7 @@ public void shouldNotVerifyAuthorizedWhenRolesInsufficient() throws Exception { Duration challenge = ofSeconds(30L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -350,6 +363,7 @@ public void shouldReauthorizeWhenExpirationLater() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -384,6 +398,7 @@ public void shouldReauthorizeWhenScopeBroader() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -419,6 +434,7 @@ public void shouldNotReauthorizeWhenExpirationEarlier() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -453,6 +469,7 @@ public void shouldNotReauthorizeWhenScopeNarrower() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -489,6 +506,7 @@ public void shouldNotReauthorizeWhenSubjectDiffers() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -525,6 +543,7 @@ public void shouldNotReauthorizeWhenContextDiffers() throws Exception { Duration challenge = ofSeconds(3L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -560,6 +579,7 @@ public void shouldDeauthorize() throws Exception { Duration challenge = ofSeconds(30L); JwtOptionsConfig options = JwtOptionsConfig.builder() + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java index be99860433..9850e10543 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardTest.java @@ -18,6 +18,7 @@ import static io.aklivity.zilla.runtime.guard.jwt.internal.keys.JwtKeyConfigs.RFC7515_RS256_CONFIG; import static io.aklivity.zilla.specs.guard.jwt.keys.JwtKeys.RFC7515_RS256; import static java.time.Duration.ofSeconds; +import static java.util.function.Function.identity; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -48,6 +49,7 @@ public class JwtGuardTest public void shouldNotVerifyMissingContext() throws Exception { GuardedConfig guarded = GuardedConfig.builder() + .inject(identity()) .name("test0") .role("read:stream") .role("write:stream") @@ -70,6 +72,7 @@ public void shouldNotVerifyMissingHandler() throws Exception when(engine.index()).thenReturn(0); GuardedConfig guarded = GuardedConfig.builder() + .inject(identity()) .name("test0") .role("read:stream") .role("write:stream") @@ -94,6 +97,7 @@ public void shouldNotVerifyMissingSession() throws Exception when(engine.index()).thenReturn(0); GuardedConfig guarded = GuardedConfig.builder() + .inject(identity()) .name("test0") .role("read:stream") .role("write:stream") @@ -105,6 +109,7 @@ public void shouldNotVerifyMissingSession() throws Exception GuardContext context = guard.supply(engine); context.attach(GuardConfig.builder() + .inject(identity()) .name("test0") .type("jwt") .options(JwtOptionsConfig.builder().build()) @@ -135,9 +140,11 @@ public void shouldNotVerifyRolesWhenInsufficient() throws Exception GuardContext context = guard.supply(engine); GuardHandler handler = context.attach(GuardConfig.builder() + .inject(identity()) .name("test0") .type("jwt") .options(JwtOptionsConfig::builder) + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -172,6 +179,7 @@ public void shouldVerifyRolesWhenExact() throws Exception when(engine.supplyAuthorizedId()).thenReturn(1L); GuardedConfig guarded = GuardedConfig.builder() + .inject(identity()) .name("test0") .role("read:stream") .role("write:stream") @@ -184,9 +192,11 @@ public void shouldVerifyRolesWhenExact() throws Exception GuardContext context = guard.supply(engine); GuardHandler handler = context.attach(GuardConfig.builder() + .inject(identity()) .name("test0") .type("jwt") .options(JwtOptionsConfig::builder) + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -221,6 +231,7 @@ public void shouldVerifyRolesWhenSuperset() throws Exception when(engine.supplyAuthorizedId()).thenReturn(1L); GuardedConfig guarded = GuardedConfig.builder() + .inject(identity()) .name("test0") .role("read:stream") .build(); @@ -232,9 +243,11 @@ public void shouldVerifyRolesWhenSuperset() throws Exception GuardContext context = guard.supply(engine); GuardHandler handler = context.attach(GuardConfig.builder() + .inject(identity()) .name("test0") .type("jwt") .options(JwtOptionsConfig::builder) + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -269,6 +282,7 @@ public void shouldVerifyRolesWhenEmpty() throws Exception when(engine.supplyAuthorizedId()).thenReturn(1L); GuardedConfig guarded = GuardedConfig.builder() + .inject(identity()) .name("test0") .build(); @@ -279,9 +293,11 @@ public void shouldVerifyRolesWhenEmpty() throws Exception GuardContext context = guard.supply(engine); GuardHandler handler = context.attach(GuardConfig.builder() + .inject(identity()) .name("test0") .type("jwt") .options(JwtOptionsConfig::builder) + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -321,9 +337,11 @@ public void shouldVerifyWhenIndexDiffers() throws Exception GuardContext context = guard.supply(engine); GuardConfig config = GuardConfig.builder() + .inject(identity()) .name("test0") .type("jwt") .options(JwtOptionsConfig::builder) + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -364,6 +382,7 @@ public void shouldIdentify() throws Exception when(engine.supplyAuthorizedId()).thenReturn(1L); GuardedConfig guarded = GuardedConfig.builder() + .inject(identity()) .name("test0") .build(); @@ -374,9 +393,11 @@ public void shouldIdentify() throws Exception GuardContext context = guard.supply(engine); GuardHandler handler = context.attach(GuardConfig.builder() + .inject(identity()) .name("test0") .type("jwt") .options(JwtOptionsConfig::builder) + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) @@ -417,9 +438,11 @@ public void shouldIdentifyWhenIndexDiffers() throws Exception Duration challenge = ofSeconds(3L); GuardConfig config = GuardConfig.builder() + .inject(identity()) .name("test0") .type("jwt") .options(JwtOptionsConfig::builder) + .inject(identity()) .issuer("test issuer") .audience("testAudience") .key(RFC7515_RS256_CONFIG) diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfigBuilder.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfigBuilder.java index 5a5879960c..124f2ed957 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfigBuilder.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemOptionsConfigBuilder.java @@ -20,7 +20,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; -public final class FileSystemOptionsConfigBuilder implements ConfigBuilder +public final class FileSystemOptionsConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -34,6 +34,13 @@ public final class FileSystemOptionsConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public FileSystemStoreConfigBuilder> keys() { return new FileSystemStoreConfigBuilder<>(this::keys); diff --git a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfigBuilder.java b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfigBuilder.java index 184480bb3e..8ab4fe671c 100644 --- a/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfigBuilder.java +++ b/runtime/vault-filesystem/src/main/java/io/aklivity/zilla/runtime/vault/filesystem/config/FileSystemStoreConfigBuilder.java @@ -19,7 +19,7 @@ import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; -public final class FileSystemStoreConfigBuilder implements ConfigBuilder +public final class FileSystemStoreConfigBuilder extends ConfigBuilder> { private final Function mapper; @@ -33,6 +33,13 @@ public final class FileSystemStoreConfigBuilder implements ConfigBuilder this.mapper = mapper; } + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + public FileSystemStoreConfigBuilder store( String store) { diff --git a/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java b/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java index f9e0d74826..fda79b60bf 100644 --- a/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java +++ b/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/config/FileSystemOptionsConfigAdapterTest.java @@ -15,6 +15,7 @@ */ package io.aklivity.zilla.runtime.vault.filesystem.internal.config; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -93,7 +94,9 @@ public void shouldWriteOptions() public void shouldWriteOptionsWithKeys() { FileSystemOptionsConfig options = FileSystemOptionsConfig.builder() + .inject(identity()) .keys() + .inject(identity()) .store("localhost.p12") .type("pkcs12") .password("generated") From 96bbe4b0ed48db73bb05cfb0dce521bb2cfd7d98 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:00:40 -0700 Subject: [PATCH 053/115] Bump org.apache.maven:maven-core from 3.6.0 to 3.8.1 (#361) * Bump org.apache.maven:maven-core from 3.6.0 to 3.8.1 Bumps [org.apache.maven:maven-core](https://github.com/apache/maven) from 3.6.0 to 3.8.1. - [Release notes](https://github.com/apache/maven/releases) - [Commits](https://github.com/apache/maven/compare/maven-3.6.0...maven-3.8.1) --- updated-dependencies: - dependency-name: org.apache.maven:maven-core dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Use maven bom to ensure consistent maven dependency versions --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: John Fallows --- cloud/docker-image/pom.xml | 3 +++ manager/NOTICE | 6 +++--- pom.xml | 27 +++++++-------------------- specs/engine.spec/NOTICE | 1 - 4 files changed, 13 insertions(+), 24 deletions(-) diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 0bf9572bff..3b6fdbd3fe 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -274,6 +274,9 @@ io/aklivity/zilla/manager/** io/aklivity/zilla/incubator/** org/agrona/** + org/apache/apache/** + org/apache/maven/maven/** + org/apache/maven/maven-parent/** jakarta/json/** jakarta/inject/** org/leadpony/justify/** diff --git a/manager/NOTICE b/manager/NOTICE index 3ee3390cb0..42f45c5be2 100644 --- a/manager/NOTICE +++ b/manager/NOTICE @@ -22,10 +22,10 @@ This project includes: JSON-B API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception JSON-P Default Provider under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception org.eclipse.yasson under Eclipse Public License v. 2.0 or Eclipse Distribution License v. 1.0 - Plexus :: Component Annotations under The Apache Software License, Version 2.0 + Plexus :: Component Annotations under Apache License, Version 2.0 Plexus Cipher: encryption/decryption Component under Apache Public License 2.0 - Plexus Classworlds under The Apache Software License, Version 2.0 - Plexus Common Utilities under The Apache Software License, Version 2.0 + Plexus Classworlds under Apache License, Version 2.0 + Plexus Common Utilities under Apache License, Version 2.0 Plexus Security Dispatcher Component under Apache Public License 2.0 Sisu-Inject-Plexus : Aggregate OSGi bundle under Eclipse Public License, Version 1.0 diff --git a/pom.xml b/pom.xml index 20470b04cc..e259e65379 100644 --- a/pom.xml +++ b/pom.xml @@ -65,6 +65,13 @@ + + org.apache.maven + maven + 3.8.1 + pom + import + jakarta.json jakarta.json-api @@ -145,26 +152,6 @@ mockito-core ${mockito.version} - - org.apache.maven - maven-core - 3.6.0 - - - org.apache.maven - maven-artifact - 3.5.0 - - - org.apache.maven - maven-compat - 3.6.0 - - - org.apache.maven.plugin-tools - maven-plugin-annotations - 3.9.0 - org.apache.maven.plugin-testing maven-plugin-testing-harness diff --git a/specs/engine.spec/NOTICE b/specs/engine.spec/NOTICE index 17c8dd9260..eb35495f05 100644 --- a/specs/engine.spec/NOTICE +++ b/specs/engine.spec/NOTICE @@ -14,7 +14,6 @@ under the License. This project includes: agrona under The Apache License, Version 2.0 ANTLR 4 Runtime under BSD-3-Clause - Hamcrest Core under New BSD License ICU4J under Unicode/ICU License Jakarta JSON Processing API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception Java Unified Expression Language API under The Apache Software License, Version 2.0 From 2e2203597606066f530da3f1c97e5da2603145cc Mon Sep 17 00:00:00 2001 From: John Fallows Date: Tue, 15 Aug 2023 18:01:00 -0700 Subject: [PATCH 054/115] Sanitize zip entry path (#362) --- .../manager/internal/commands/install/ZpmInstall.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/manager/src/main/java/io/aklivity/zilla/manager/internal/commands/install/ZpmInstall.java b/manager/src/main/java/io/aklivity/zilla/manager/internal/commands/install/ZpmInstall.java index eacaadf4c3..85343e48e0 100644 --- a/manager/src/main/java/io/aklivity/zilla/manager/internal/commands/install/ZpmInstall.java +++ b/manager/src/main/java/io/aklivity/zilla/manager/internal/commands/install/ZpmInstall.java @@ -731,8 +731,12 @@ private void expandJar( { for (JarEntry entry : list(sourceJar.entries())) { - Path entryPath = targetDir.resolve(entry.getName()); - if (entry.isDirectory()) + Path entryPath = targetDir.resolve(entry.getName()).normalize(); + if (!entryPath.startsWith(targetDir)) + { + throw new IOException("Bad zip entry"); + } + else if (entry.isDirectory()) { createDirectories(entryPath); } From ae11f476e0a4ad87e379ef3c130ba0b7f1acc970 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Tue, 15 Aug 2023 18:49:54 -0700 Subject: [PATCH 055/115] Upgrade to Maven 3.9.4 --- .mvn/wrapper/maven-wrapper.properties | 4 ++-- pom.xml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties index 59ce113780..2e76e189d2 100644 --- a/.mvn/wrapper/maven-wrapper.properties +++ b/.mvn/wrapper/maven-wrapper.properties @@ -1,2 +1,2 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.4/apache-maven-3.8.4-bin.zip -wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.4/apache-maven-3.9.4-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar diff --git a/pom.xml b/pom.xml index e259e65379..dae4371fe9 100644 --- a/pom.xml +++ b/pom.xml @@ -68,7 +68,7 @@ org.apache.maven maven - 3.8.1 + 3.9.4 pom import From e6149d22c18dd4d4745cbf8a82b1a601cb9a2f7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Aug 2023 11:03:38 -0700 Subject: [PATCH 056/115] Bump org.apache.maven.plugins:maven-plugin-plugin from 3.5 to 3.9.0 (#366) Bumps [org.apache.maven.plugins:maven-plugin-plugin](https://github.com/apache/maven-plugin-tools) from 3.5 to 3.9.0. - [Release notes](https://github.com/apache/maven-plugin-tools/releases) - [Commits](https://github.com/apache/maven-plugin-tools/compare/maven-plugin-tools-3.5...maven-plugin-tools-3.9.0) --- updated-dependencies: - dependency-name: org.apache.maven.plugins:maven-plugin-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index dae4371fe9..802477b5d1 100644 --- a/pom.xml +++ b/pom.xml @@ -343,7 +343,7 @@ org.apache.maven.plugins maven-plugin-plugin - 3.5 + 3.9.0 org.apache.maven.plugins From 1a20c345f1d1045a484d9049422f4ae162ca5df5 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Thu, 17 Aug 2023 11:25:41 -0700 Subject: [PATCH 057/115] Merge consumer group metadata (#359) --- .../stream/KafkaClientGroupFactory.java | 21 +- .../binding-kafka/src/main/zilla/protocol.idl | 18 + .../tls/internal/streams/ClientIT.java | 1 + .../kafka/internal/KafkaFunctions.java | 307 ++++++++++++++++++ .../main/resources/META-INF/zilla/kafka.idl | 63 ++++ .../kafka/internal/KafkaFunctionsTest.java | 160 +++++++++ 6 files changed, 562 insertions(+), 8 deletions(-) diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index 4660710a84..aba4326b47 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -2507,26 +2507,31 @@ private void doEncodeSyncGroupRequest( final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); + final boolean isLeader = leader.equals(memberId); + final SyncGroupRequestFW syncGroupRequest = syncGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) .groupId(delegate.groupId) .generatedId(generationId) .memberId(memberId) .groupInstanceId(delegate.groupMembership.instanceId) - .assignmentCount(members.size()) + .assignmentCount(isLeader ? members.size() : 0) .build(); encodeProgress = syncGroupRequest.limit(); - for (int i = 0; i < members.size(); i++) + if (isLeader) { - final AssignmentFW groupAssignment = - assignmentRW.wrap(encodeBuffer, encodeProgress, encodeLimit) - .memberId(members.get(i)) - .value(assignment) - .build(); + for (int i = 0; i < members.size(); i++) + { + final AssignmentFW groupAssignment = + assignmentRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .memberId(members.get(i)) + .value(assignment) + .build(); - encodeProgress = groupAssignment.limit(); + encodeProgress = groupAssignment.limit(); + } } final int requestId = nextRequestId++; diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl index 02cbe07d4b..c789a6b03a 100644 --- a/runtime/binding-kafka/src/main/zilla/protocol.idl +++ b/runtime/binding-kafka/src/main/zilla/protocol.idl @@ -476,6 +476,24 @@ scope protocol string16 memberId; string16 groupInstanceId = null; } + + struct TopicPartition + { + int32 partitionId; + } + + struct ConsumerAssignment + { + string16 consumerId; + TopicPartition[] partitions; + } + + struct MemberAssignment + { + string16 topic; + TopicPartition[] partitions; + octets userdata; + } } scope sasl diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientIT.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientIT.java index 6f27c43510..8526d07531 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientIT.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientIT.java @@ -264,6 +264,7 @@ public void shouldReceiveClientSentWriteCloseBeforeHandshake() throws Exception k3po.finish(); } + @Ignore("GitHub Actions") @Test @Configuration("client.yaml") @Specification({ diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index 8fc2295ab0..72eadf4add 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -62,6 +62,8 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaApi; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBootstrapBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaDescribeBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaDescribeDataExFW; @@ -71,11 +73,16 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetCommitBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetCommitDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetFetchBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetFetchDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceFlushExFW; @@ -617,6 +624,27 @@ public KafkaGroupBeginExBuilder group() return new KafkaGroupBeginExBuilder(); } + public KafkaConsumerBeginExBuilder consumer() + { + beginExRW.kind(KafkaApi.CONSUMER.value()); + + return new KafkaConsumerBeginExBuilder(); + } + + public KafkaOffsetFetchBeginExBuilder offsetFetch() + { + beginExRW.kind(KafkaApi.OFFSET_FETCH.value()); + + return new KafkaOffsetFetchBeginExBuilder(); + } + + public KafkaOffsetCommitBeginExBuilder offsetCommit() + { + beginExRW.kind(KafkaApi.OFFSET_COMMIT.value()); + + return new KafkaOffsetCommitBeginExBuilder(); + } + public byte[] build() { final KafkaBeginExFW beginEx = beginExRO; @@ -672,6 +700,20 @@ public KafkaMergedBeginExBuilder topic( return this; } + public KafkaMergedBeginExBuilder groupId( + String groupId) + { + mergedBeginExRW.groupId(groupId); + return this; + } + + public KafkaMergedBeginExBuilder consumerId( + String consumerId) + { + mergedBeginExRW.consumerId(consumerId); + return this; + } + public KafkaMergedBeginExBuilder partition( int partitionId, long offset) @@ -1013,6 +1055,112 @@ public KafkaBeginExBuilder build() return KafkaBeginExBuilder.this; } } + + public final class KafkaConsumerBeginExBuilder + { + private final KafkaConsumerBeginExFW.Builder consumerBeginExRW = new KafkaConsumerBeginExFW.Builder(); + + + private KafkaConsumerBeginExBuilder() + { + consumerBeginExRW.wrap(writeBuffer, KafkaBeginExFW.FIELD_OFFSET_CONSUMER, writeBuffer.capacity()); + } + + public KafkaConsumerBeginExBuilder groupId( + String groupId) + { + consumerBeginExRW.groupId(groupId); + return this; + } + + public KafkaConsumerBeginExBuilder topic( + String topic) + { + consumerBeginExRW.topic(topic); + return this; + } + + public KafkaConsumerBeginExBuilder partition( + int partitionId) + { + consumerBeginExRW.partitionIds(p -> p.item(i -> i.partitionId(partitionId))); + return this; + } + + public KafkaBeginExBuilder build() + { + final KafkaConsumerBeginExFW consumerBeginEx = consumerBeginExRW.build(); + beginExRO.wrap(writeBuffer, 0, consumerBeginEx.limit()); + return KafkaBeginExBuilder.this; + } + } + + public final class KafkaOffsetFetchBeginExBuilder + { + private final KafkaOffsetFetchBeginExFW.Builder offsetFetchBeginExRW = new KafkaOffsetFetchBeginExFW.Builder(); + + + private KafkaOffsetFetchBeginExBuilder() + { + offsetFetchBeginExRW.wrap(writeBuffer, KafkaBeginExFW.FIELD_OFFSET_OFFSET_FETCH, writeBuffer.capacity()); + } + + public KafkaOffsetFetchBeginExBuilder groupId( + String groupId) + { + offsetFetchBeginExRW.groupId(groupId); + return this; + } + + public KafkaOffsetFetchBeginExBuilder topic( + String topic, + int partitionId) + { + offsetFetchBeginExRW.topics(t -> t.item(i -> + i.topic(topic) + .partitions(p -> p.item(a -> a.partitionId(partitionId))))); + return this; + } + + public KafkaBeginExBuilder build() + { + final KafkaOffsetFetchBeginExFW consumerBeginEx = offsetFetchBeginExRW.build(); + beginExRO.wrap(writeBuffer, 0, consumerBeginEx.limit()); + return KafkaBeginExBuilder.this; + } + } + + public final class KafkaOffsetCommitBeginExBuilder + { + private final KafkaOffsetCommitBeginExFW.Builder offsetCommitBeginExRW = new KafkaOffsetCommitBeginExFW.Builder(); + + + private KafkaOffsetCommitBeginExBuilder() + { + offsetCommitBeginExRW.wrap(writeBuffer, KafkaBeginExFW.FIELD_OFFSET_OFFSET_COMMIT, writeBuffer.capacity()); + } + + public KafkaOffsetCommitBeginExBuilder groupId( + String groupId) + { + offsetCommitBeginExRW.groupId(groupId); + return this; + } + + public KafkaOffsetCommitBeginExBuilder topic( + String topic) + { + offsetCommitBeginExRW.topic(topic); + return this; + } + + public KafkaBeginExBuilder build() + { + final KafkaOffsetCommitBeginExFW offsetCommitBeginEx = offsetCommitBeginExRW.build(); + beginExRO.wrap(writeBuffer, 0, offsetCommitBeginEx.limit()); + return KafkaBeginExBuilder.this; + } + } } public static final class KafkaDataExBuilder @@ -1077,6 +1225,27 @@ public KafkaGroupDataExBuilder group() return new KafkaGroupDataExBuilder(); } + public KafkaConsumerDataExBuilder consumer() + { + dataExRW.kind(KafkaApi.CONSUMER.value()); + + return new KafkaConsumerDataExBuilder(); + } + + public KafkaOffsetFetchDataExBuilder offsetFetch() + { + dataExRW.kind(KafkaApi.OFFSET_FETCH.value()); + + return new KafkaOffsetFetchDataExBuilder(); + } + + public KafkaOffsetCommitDataExBuilder offsetCommit() + { + dataExRW.kind(KafkaApi.OFFSET_COMMIT.value()); + + return new KafkaOffsetCommitDataExBuilder(); + } + public byte[] build() { final KafkaDataExFW dataEx = dataExRO; @@ -1629,6 +1798,90 @@ public KafkaDataExBuilder build() return KafkaDataExBuilder.this; } } + + public final class KafkaConsumerDataExBuilder + { + private final KafkaConsumerDataExFW.Builder consumerDataExRW = new KafkaConsumerDataExFW.Builder(); + + private KafkaConsumerDataExBuilder() + { + consumerDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_GROUP, writeBuffer.capacity()); + } + + public KafkaConsumerDataExBuilder partition( + int partitionId) + { + consumerDataExRW.partitions(p -> p.item(i -> i.partitionId(partitionId))); + return this; + } + + public KafkaDataExBuilder build() + { + final KafkaConsumerDataExFW consumerDataEx = consumerDataExRW.build(); + dataExRO.wrap(writeBuffer, 0, consumerDataEx.limit()); + return KafkaDataExBuilder.this; + } + } + + public final class KafkaOffsetFetchDataExBuilder + { + private final KafkaOffsetFetchDataExFW.Builder offsetFetchDataExRW = new KafkaOffsetFetchDataExFW.Builder(); + + private KafkaOffsetFetchDataExBuilder() + { + offsetFetchDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_OFFSET_FETCH, writeBuffer.capacity()); + } + + public KafkaOffsetFetchDataExBuilder topic( + String topic, + int partitionId, + long stableOffset, + long latestOffset) + { + offsetFetchDataExRW.topic(t -> + t.topic(topic).offsets(o -> o.item(i -> + i.partitionId(partitionId).stableOffset(stableOffset).latestOffset(latestOffset)))); + return this; + } + + public KafkaDataExBuilder build() + { + final KafkaOffsetFetchDataExFW offsetFetchDataEx = offsetFetchDataExRW.build(); + dataExRO.wrap(writeBuffer, 0, offsetFetchDataEx.limit()); + return KafkaDataExBuilder.this; + } + } + + public final class KafkaOffsetCommitDataExBuilder + { + private final KafkaOffsetCommitDataExFW.Builder offsetCommitDataExRW = new KafkaOffsetCommitDataExFW.Builder(); + + private KafkaOffsetCommitDataExBuilder() + { + offsetCommitDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_OFFSET_COMMIT, writeBuffer.capacity()); + } + + public KafkaOffsetCommitDataExBuilder partitionId( + int partitionId) + { + offsetCommitDataExRW.partitionId(partitionId); + return this; + } + + public KafkaOffsetCommitDataExBuilder partitionOffset( + long partitionId) + { + offsetCommitDataExRW.partitionOffset(partitionId); + return this; + } + + public KafkaDataExBuilder build() + { + final KafkaOffsetCommitDataExFW consumerDataEx = offsetCommitDataExRW.build(); + dataExRO.wrap(writeBuffer, 0, consumerDataEx.limit()); + return KafkaDataExBuilder.this; + } + } } public static final class KafkaFlushExBuilder @@ -1673,6 +1926,13 @@ public KafkaProduceFlushExBuilder produce() return new KafkaProduceFlushExBuilder(); } + public KafkaGroupFlushExBuilder group() + { + flushExRW.kind(KafkaApi.GROUP.value()); + + return new KafkaGroupFlushExBuilder(); + } + public byte[] build() { final KafkaFlushExFW flushEx = flushExRO; @@ -1914,6 +2174,53 @@ public KafkaFlushExBuilder build() return KafkaFlushExBuilder.this; } } + + public final class KafkaGroupFlushExBuilder + { + private final KafkaGroupFlushExFW.Builder groupFlushExRW = new KafkaGroupFlushExFW.Builder(); + + private KafkaGroupFlushExBuilder() + { + groupFlushExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_FETCH, writeBuffer.capacity()); + } + + public KafkaGroupFlushExBuilder partition( + int partitionId, + long partitionOffset) + { + partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); + return this; + } + + public KafkaGroupFlushExBuilder partition( + int partitionId, + long partitionOffset, + long latestOffset) + { + partition(partitionId, partitionOffset, latestOffset, latestOffset); + return this; + } + + public KafkaGroupFlushExBuilder partition( + int partitionId, + long offset, + long stableOffset, + long latestOffset) + { + groupFlushExRW.partition(p -> p.partitionId(partitionId) + .partitionOffset(offset) + .stableOffset(stableOffset) + .latestOffset(latestOffset)); + return this; + } + + public KafkaFlushExBuilder build() + { + final KafkaGroupFlushExFW groupFlushEx = groupFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, groupFlushEx.limit()); + return KafkaFlushExBuilder.this; + } + } } public static final class KafkaResetExBuilder diff --git a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl index b0f3f76845..29b072619f 100644 --- a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl +++ b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl @@ -166,10 +166,13 @@ scope kafka { enum KafkaApi (uint8) { + CONSUMER (252), GROUP (253), BOOTSTRAP (254), MERGED (255), META (3), + OFFSET_COMMIT (8), + OFFSET_FETCH (9), DESCRIBE (32), FETCH (1), PRODUCE (0) @@ -177,10 +180,13 @@ scope kafka union KafkaBeginEx switch (uint8) extends core::stream::Extension { + case 252: kafka::stream::KafkaConsumerBeginEx consumer; case 253: kafka::stream::KafkaGroupBeginEx group; case 254: kafka::stream::KafkaBootstrapBeginEx bootstrap; case 255: kafka::stream::KafkaMergedBeginEx merged; case 3: kafka::stream::KafkaMetaBeginEx meta; + case 8: kafka::stream::KafkaOffsetCommitBeginEx offsetCommit; + case 9: kafka::stream::KafkaOffsetFetchBeginEx offsetFetch; case 32: kafka::stream::KafkaDescribeBeginEx describe; case 1: kafka::stream::KafkaFetchBeginEx fetch; case 0: kafka::stream::KafkaProduceBeginEx produce; @@ -188,9 +194,12 @@ scope kafka union KafkaDataEx switch (uint8) extends core::stream::Extension { + case 252: kafka::stream::KafkaConsumerDataEx consumer; case 253: kafka::stream::KafkaGroupDataEx group; case 255: kafka::stream::KafkaMergedDataEx merged; case 3: kafka::stream::KafkaMetaDataEx meta; + case 8: kafka::stream::KafkaOffsetCommitDataEx offsetCommit; + case 9: kafka::stream::KafkaOffsetFetchDataEx offsetFetch; case 32: kafka::stream::KafkaDescribeDataEx describe; case 1: kafka::stream::KafkaFetchDataEx fetch; case 0: kafka::stream::KafkaProduceDataEx produce; @@ -207,6 +216,7 @@ scope kafka struct KafkaResetEx extends core::stream::Extension { int32 error = 0; + string16 consumerId = null; } struct KafkaBootstrapBeginEx @@ -219,6 +229,7 @@ scope kafka KafkaCapabilities capabilities = PRODUCE_AND_FETCH; string16 topic; string16 groupId = null; + string16 consumerId = null; KafkaOffset[] partitions; KafkaFilter[] filters; // ORed KafkaEvaluation evaluation = LAZY; @@ -345,5 +356,57 @@ scope kafka string16 memberId; int32 members; } + + struct TopicPartition + { + int32 partitionId; + } + + struct KafkaConsumerBeginEx + { + string16 groupId; + string16 topic; + TopicPartition[] partitionIds; + } + + struct KafkaConsumerDataEx + { + TopicPartition[] partitions; + } + + struct KafkaOffsetFetchTopic + { + string16 topic; + TopicPartition[] partitions; + } + + struct KafkaOffsetFetchBeginEx + { + string16 groupId; + KafkaOffsetFetchTopic[] topics; + } + + struct KafkaOffsetFetchTopicOffsets + { + string16 topic; + KafkaOffset[] offsets; + } + + struct KafkaOffsetFetchDataEx + { + KafkaOffsetFetchTopicOffsets topic; + } + + struct KafkaOffsetCommitBeginEx + { + string16 groupId; + string16 topic; + } + + struct KafkaOffsetCommitDataEx + { + int32 partitionId; + int64 partitionOffset; + } } } diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index 010262a753..6be4ef9041 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -59,6 +59,8 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaApi; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBootstrapBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaDescribeBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaDescribeDataExFW; @@ -68,11 +70,17 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetCommitBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetCommitDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetFetchBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetFetchDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaOffsetFetchTopicFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceFlushExFW; @@ -212,6 +220,8 @@ public void shouldGenerateMergedBeginExtension() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("topic") + .groupId("groupId") + .consumerId("consumerId") .partition(0, 1L) .filter() .key("match") @@ -232,6 +242,8 @@ public void shouldGenerateMergedBeginExtension() final KafkaMergedBeginExFW mergedBeginEx = beginEx.merged(); assertEquals("topic", mergedBeginEx.topic().asString()); + assertEquals("groupId", mergedBeginEx.groupId().asString()); + assertEquals("consumerId", mergedBeginEx.consumerId().asString()); assertNotNull(mergedBeginEx.partitions() .matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L)); @@ -2101,6 +2113,26 @@ public void shouldGenerateFetchFlushExtensionWithLatestOffset() assertEquals(1L, partition.latestOffset()); } + @Test + public void shouldGenerateGroupFlushExtension() + { + byte[] build = KafkaFunctions.flushEx() + .typeId(0x01) + .group() + .partition(0, 1L) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, flushEx.typeId()); + + final KafkaGroupFlushExFW groupFlushEx = flushEx.group(); + final KafkaOffsetFW partition = groupFlushEx.partition(); + assertEquals(0, partition.partitionId()); + assertEquals(1L, partition.partitionOffset()); + } + @Test public void shouldMatchFetchDataExtension() throws Exception { @@ -3807,6 +3839,72 @@ public void shouldGenerateGroupBeginExtension() assertEquals(10, groupBeginEx.timeout()); } + @Test + public void shouldGenerateConsumerBeginExtension() + { + byte[] build = KafkaFunctions.beginEx() + .typeId(0x01) + .consumer() + .groupId("test") + .topic("topic") + .partition(1) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaBeginExFW beginEx = new KafkaBeginExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, beginEx.typeId()); + assertEquals(KafkaApi.CONSUMER.value(), beginEx.kind()); + + final KafkaConsumerBeginExFW consumerBeginEx = beginEx.consumer(); + assertEquals("test", consumerBeginEx.groupId().asString()); + assertEquals("topic", consumerBeginEx.topic().asString()); + assertEquals(1, consumerBeginEx.partitionIds().fieldCount()); + } + + @Test + public void shouldGenerateOffsetFetchBeginExtension() + { + byte[] build = KafkaFunctions.beginEx() + .typeId(0x01) + .offsetFetch() + .groupId("test") + .topic("topic", 0) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaBeginExFW beginEx = new KafkaBeginExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, beginEx.typeId()); + assertEquals(KafkaApi.OFFSET_FETCH.value(), beginEx.kind()); + + final KafkaOffsetFetchBeginExFW offsetFetchBeginEx = beginEx.offsetFetch(); + KafkaOffsetFetchTopicFW topic = offsetFetchBeginEx.topics() + .matchFirst(t -> t.topic().asString().equals("topic")); + assertEquals(1, topic.partitions().fieldCount()); + } + + @Test + public void shouldGenerateOffsetCommitBeginExtension() + { + byte[] build = KafkaFunctions.beginEx() + .typeId(0x01) + .offsetCommit() + .groupId("test") + .topic("topic") + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaBeginExFW beginEx = new KafkaBeginExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, beginEx.typeId()); + assertEquals(KafkaApi.OFFSET_COMMIT.value(), beginEx.kind()); + + final KafkaOffsetCommitBeginExFW offsetCommitBeginEx = beginEx.offsetCommit(); + assertEquals("test", offsetCommitBeginEx.groupId().asString()); + assertEquals("topic", offsetCommitBeginEx.topic().asString()); + } + @Test public void shouldMatchGroupBeginExtension() throws Exception { @@ -3855,6 +3953,68 @@ public void shouldGenerateGroupDataExtension() assertTrue(groupDataEx.members() == 2); } + @Test + public void shouldGenerateConsumerDataExtension() + { + byte[] build = KafkaFunctions.dataEx() + .typeId(0x01) + .consumer() + .partition(0) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, dataEx.typeId()); + assertEquals(KafkaApi.CONSUMER.value(), dataEx.kind()); + + final KafkaConsumerDataExFW consumerDataEx = dataEx.consumer(); + assertTrue(consumerDataEx.partitions().fieldCount() == 1); + } + + @Test + public void shouldGenerateOffsetFetchDataExtension() + { + byte[] build = KafkaFunctions.dataEx() + .typeId(0x01) + .offsetFetch() + .topic("test", 0, 1L, 2L) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, dataEx.typeId()); + assertEquals(KafkaApi.OFFSET_FETCH.value(), dataEx.kind()); + + final KafkaOffsetFetchDataExFW offsetFetchDataEx = dataEx.offsetFetch(); + KafkaOffsetFW offset = offsetFetchDataEx.topic().offsets().matchFirst(o -> o.partitionId() == 0); + assertEquals("test", offsetFetchDataEx.topic().topic().asString()); + assertEquals(1L, offset.stableOffset()); + assertEquals(2L, offset.latestOffset()); + } + + @Test + public void shouldGenerateOffsetCommitDataExtension() + { + byte[] build = KafkaFunctions.dataEx() + .typeId(0x01) + .offsetCommit() + .partitionId(0) + .partitionOffset(1L) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, dataEx.typeId()); + assertEquals(KafkaApi.OFFSET_COMMIT.value(), dataEx.kind()); + + final KafkaOffsetCommitDataExFW offsetCommitDataEx = dataEx.offsetCommit(); + assertEquals(0, offsetCommitDataEx.partitionId()); + assertEquals(1L, offsetCommitDataEx.partitionOffset()); + } + @Test public void shouldMatchGroupDataExtension() throws Exception { From 2fe3ee3ca33b0af817104af57079acdfa3320de3 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 17 Aug 2023 12:27:30 -0700 Subject: [PATCH 058/115] Support config builder for MQTT config (#372) --- .../mqtt/config/MqttConditionConfig.java | 15 ++++- .../config/MqttConditionConfigBuilder.java | 63 +++++++++++++++++++ .../config/MqttConditionConfigAdapter.java | 19 +++--- .../MqttConditionConfigAdapterTest.java | 9 ++- 4 files changed, 96 insertions(+), 10 deletions(-) create mode 100644 incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java index 82d6dedb5f..f8b8b45575 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java @@ -15,6 +15,8 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.config; +import java.util.function.Function; + import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; @@ -23,7 +25,18 @@ public final class MqttConditionConfig extends ConditionConfig public final String topic; public final MqttCapabilities capabilities; - public MqttConditionConfig( + public static MqttConditionConfigBuilder builder() + { + return new MqttConditionConfigBuilder<>(MqttConditionConfig.class::cast); + } + + public static MqttConditionConfigBuilder builder( + Function mapper) + { + return new MqttConditionConfigBuilder<>(mapper); + } + + MqttConditionConfig( String topic, MqttCapabilities capabilities) { diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java new file mode 100644 index 0000000000..a9db039b3d --- /dev/null +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; +import io.aklivity.zilla.runtime.engine.config.ConditionConfig; +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public final class MqttConditionConfigBuilder extends ConfigBuilder> +{ + private final Function mapper; + + private String topic; + private MqttCapabilities capabilities; + + MqttConditionConfigBuilder( + Function mapper) + { + this.mapper = mapper; + } + + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + + public MqttConditionConfigBuilder topic( + String topic) + { + this.topic = topic; + return this; + } + + public MqttConditionConfigBuilder capabilities( + MqttCapabilities capabilities) + { + this.capabilities = capabilities; + return this; + } + + @Override + public T build() + { + return mapper.apply(new MqttConditionConfig(topic, capabilities)); + } +} diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java index caa1b367e9..c9464a035a 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java @@ -21,6 +21,7 @@ import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfigBuilder; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBinding; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; @@ -62,14 +63,18 @@ public JsonObject adaptToJson( public ConditionConfig adaptFromJson( JsonObject object) { - String topic = object.containsKey(TOPIC_NAME) - ? object.getString(TOPIC_NAME) - : null; + MqttConditionConfigBuilder mqttConfig = MqttConditionConfig.builder(); - MqttCapabilities capabilities = object.containsKey(CAPABILITIES_NAME) - ? MqttCapabilities.valueOf(object.getString(CAPABILITIES_NAME).toUpperCase()) - : null; + if (object.containsKey(TOPIC_NAME)) + { + mqttConfig.topic(object.getString(TOPIC_NAME)); + } + + if (object.containsKey(CAPABILITIES_NAME)) + { + mqttConfig.capabilities(MqttCapabilities.valueOf(object.getString(CAPABILITIES_NAME).toUpperCase())); + } - return new MqttConditionConfig(topic, capabilities); + return mqttConfig.build(); } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java index aa51fec231..f56647f677 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java @@ -17,6 +17,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities.PUBLISH_ONLY; import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities.SUBSCRIBE_ONLY; +import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -50,7 +51,7 @@ public void shouldReadCondition() "{" + "\"topic\": \"test\"," + "\"capabilities\": \"publish_only\"" + - "}"; + "}"; MqttConditionConfig condition = jsonb.fromJson(text, MqttConditionConfig.class); @@ -62,7 +63,11 @@ public void shouldReadCondition() @Test public void shouldWriteCondition() { - MqttConditionConfig condition = new MqttConditionConfig("test", SUBSCRIBE_ONLY); + MqttConditionConfig condition = MqttConditionConfig.builder() + .inject(identity()) + .topic("test") + .capabilities(SUBSCRIBE_ONLY) + .build(); String text = jsonb.toJson(condition); From 79b1d3360b3940c0fb6a956d10d26074d334316b Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 17 Aug 2023 16:02:34 -0700 Subject: [PATCH 059/115] Support binding config builder exit (#373) --- .../engine/config/BindingConfigBuilder.java | 21 ++++++++++++++++++- .../config/BindingConfigsAdapter.java | 8 +++---- .../config/BindingConfigsAdapterTest.java | 6 ++---- 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java index 43a56118ef..50aa0863c4 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/BindingConfigBuilder.java @@ -33,6 +33,7 @@ public final class BindingConfigBuilder extends ConfigBuilder routes; private TelemetryRefConfig telemetry; @@ -85,6 +86,13 @@ public BindingConfigBuilder entry( return this; } + public BindingConfigBuilder exit( + String exit) + { + this.exit = exit; + return this; + } + public , C>> C options( Function>, C> options) { @@ -100,7 +108,8 @@ public BindingConfigBuilder options( public RouteConfigBuilder> route() { - return new RouteConfigBuilder<>(this::route); + return new RouteConfigBuilder<>(this::route) + .order(routes != null ? routes.size() : 0); } public BindingConfigBuilder route( @@ -110,6 +119,9 @@ public BindingConfigBuilder route( { routes = new LinkedList<>(); } + + assert route.order == routes.size(); + routes.add(route); return this; } @@ -129,6 +141,13 @@ public BindingConfigBuilder telemetry( @Override public T build() { + if (exit != null) + { + route() + .exit(exit) + .build(); + } + return mapper.apply(new BindingConfig( vault, name, diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java index fe056d0855..2ea5e22020 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapter.java @@ -151,9 +151,10 @@ public BindingConfig[] adaptFromJson( binding.options(options.adaptFromJson(item.getJsonObject(OPTIONS_NAME))); } - MutableInteger order = new MutableInteger(); if (item.containsKey(ROUTES_NAME)) { + MutableInteger order = new MutableInteger(); + item.getJsonArray(ROUTES_NAME) .stream() .map(JsonValue::asJsonObject) @@ -164,10 +165,7 @@ public BindingConfig[] adaptFromJson( if (item.containsKey(EXIT_NAME)) { - binding.route() - .order(order.value++) - .exit(item.getString(EXIT_NAME)) - .build(); + binding.exit(item.getString(EXIT_NAME)); } if (item.containsKey(TELEMETRY_NAME)) diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java index 53200c9213..3811b35d3f 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/config/BindingConfigsAdapterTest.java @@ -227,13 +227,11 @@ public void shouldWriteBindingWithExit() BindingConfig[] bindings = { BindingConfig.builder() + .inject(identity()) .name("test") .type("test") .kind(SERVER) - .route() - .inject(identity()) - .exit("test") - .build() + .exit("test") .build() }; From 08684433c465b664e91aa577e2ad6e3fa3693ae2 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 17 Aug 2023 21:40:24 -0700 Subject: [PATCH 060/115] Review budget debitors (#374) * Specify correct http server reply debitor watcherId * Tidy debitor and debitor index field names --- .../internal/stream/HttpClientFactory.java | 9 ---- .../internal/stream/HttpServerFactory.java | 20 ++++----- .../stream/KafkaCacheClientFetchFactory.java | 44 +++++++++---------- .../stream/KafkaClientFetchFactory.java | 36 +++++++-------- .../sse/internal/stream/SseServerFactory.java | 32 +++++++------- .../budget/DefaultBudgetCreditor.java | 6 +++ 6 files changed, 72 insertions(+), 75 deletions(-) diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpClientFactory.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpClientFactory.java index 51c51ca383..0088d56060 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpClientFactory.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpClientFactory.java @@ -5061,15 +5061,6 @@ private final class Http2HeadersEncoder { private HpackContext context; - void encodePromise( - HpackContext encodeContext, - Array32FW headers, - HpackHeaderBlockFW.Builder headerBlock) - { - reset(encodeContext); - headers.forEach(h -> headerBlock.header(b -> encodeHeader(h.name(), h.value(), b))); - } - void encodeHeaders( HpackContext encodeContext, Array32FW headers, diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java index 78f0974c7c..2357bfffd5 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java @@ -5511,8 +5511,8 @@ private final class Http2Exchange private long requestBud; private int requestCaps; - private BudgetDebitor requestDebitor; - private long requestDebitorIndex = NO_DEBITOR_INDEX; + private BudgetDebitor requestDeb; + private long requestDebIndex = NO_DEBITOR_INDEX; private int localBudget; private int remoteBudget; @@ -5582,10 +5582,10 @@ private void doRequestData( int length = Math.max(Math.min(initialWindow() - requestPad, remaining.value), 0); int reserved = length + requestPad; - if (requestDebitorIndex != NO_DEBITOR_INDEX && requestDebitor != null) + if (requestDebIndex != NO_DEBITOR_INDEX && requestDeb != null) { final int minimum = reserved; // TODO: fragmentation - reserved = requestDebitor.claim(0L, requestDebitorIndex, requestId, minimum, reserved, 0); + reserved = requestDeb.claim(0L, requestDebIndex, requestId, minimum, reserved, 0); length = Math.max(reserved - requestPad, 0); } @@ -5725,10 +5725,10 @@ private void onRequestWindow( requestBud = budgetId; requestCaps = capabilities; - if (requestBud != 0L && requestDebitorIndex == NO_DEBITOR_INDEX) + if (requestBud != 0L && requestDebIndex == NO_DEBITOR_INDEX) { - requestDebitor = supplyDebitor.apply(budgetId); - requestDebitorIndex = requestDebitor.acquire(budgetId, initialId, Http2Server.this::decodeNetworkIfNecessary); + requestDeb = supplyDebitor.apply(budgetId); + requestDebIndex = requestDeb.acquire(budgetId, requestId, Http2Server.this::decodeNetworkIfNecessary); } decodeNetworkIfNecessary(traceId); @@ -5791,10 +5791,10 @@ private void deauthorizeIfNecessary() private void cleanupRequestDebitorIfNecessary() { - if (requestDebitorIndex != NO_DEBITOR_INDEX) + if (requestDebIndex != NO_DEBITOR_INDEX) { - requestDebitor.release(requestDebitorIndex, initialId); - requestDebitorIndex = NO_DEBITOR_INDEX; + requestDeb.release(requestDebIndex, requestId); + requestDebIndex = NO_DEBITOR_INDEX; } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFetchFactory.java index c9bb19d703..0bc5c7b6b9 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFetchFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFetchFactory.java @@ -899,8 +899,8 @@ private final class KafkaCacheClientFetchStream private int state; private int flushFramesSent; - private long replyDebitorIndex = NO_DEBITOR_INDEX; - private BudgetDebitor replyDebitor; + private long replyDebIndex = NO_DEBITOR_INDEX; + private BudgetDebitor replyDeb; private long initialSeq; private long initialAck; @@ -912,7 +912,7 @@ private final class KafkaCacheClientFetchStream private int replyMax; private int replyMin; - private long replyBudgetId; + private long replyBud; private long initialOffset; private int messageOffset; @@ -1159,7 +1159,7 @@ private void doClientReplyDataIfNecessary( { assert !KafkaState.closing(state) : String.format("!replyClosing(%08x) [%016x] [%016x] [%016x] %s", - state, replyBudgetId, replyId, replyDebitorIndex, replyDebitor); + state, replyBud, replyId, replyDebIndex, replyDeb); final long initialIsolatedOffset = initialGroupIsolatedOffset.getAsLong(); @@ -1275,11 +1275,11 @@ private void doClientReplyData( { int reserved = reservedMax; boolean claimed = false; - if (replyDebitorIndex != NO_DEBITOR_INDEX) + if (replyDebIndex != NO_DEBITOR_INDEX) { final int lengthMax = Math.min(reservedMax - replyPad, remaining); final int deferredMax = remaining - lengthMax; - reserved = replyDebitor.claim(traceId, replyDebitorIndex, replyId, reservedMin, reservedMax, deferredMax); + reserved = replyDeb.claim(traceId, replyDebIndex, replyId, reservedMin, reservedMax, deferredMax); claimed = reserved > 0; } @@ -1375,7 +1375,7 @@ private void doClientReplyDataFull( long latestOffset) { doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, flags, replyBudgetId, reserved, value, + traceId, authorization, flags, replyBud, reserved, value, ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) .typeId(kafkaTypeId) .fetch(f -> f.timestamp(timestamp) @@ -1420,7 +1420,7 @@ private void doClientReplyDataInit( long latestOffset) { doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, flags, replyBudgetId, reserved, fragment, + traceId, authorization, flags, replyBud, reserved, fragment, ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) .typeId(kafkaTypeId) .fetch(f -> f.deferred(deferred) @@ -1451,7 +1451,7 @@ private void doClientReplyDataNone( int flags) { doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, flags, replyBudgetId, reserved, fragment, EMPTY_EXTENSION); + traceId, authorization, flags, replyBud, reserved, fragment, EMPTY_EXTENSION); replySeq += reserved; @@ -1473,7 +1473,7 @@ private void doClientReplyDataFin( long latestOffset) { doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, flags, replyBudgetId, reserved, fragment, + traceId, authorization, flags, replyBud, reserved, fragment, ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) .typeId(kafkaTypeId) .fetch(f -> f.partition(p -> p.partitionId(partitionId) @@ -1512,7 +1512,7 @@ private void doClientReplyFlush( assert partitionOffset >= cursor.offset : String.format("%d >= %d", partitionOffset, cursor.offset); doFlush(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, replyBudgetId, reserved, ex -> ex + traceId, authorization, replyBud, reserved, ex -> ex .set((b, o, l) -> kafkaFlushExRW.wrap(b, o, l) .typeId(kafkaTypeId) .fetch(f -> f @@ -1546,7 +1546,7 @@ private void doClientReplyFlush( //assert partitionOffset >= cursor.offset : String.format("%d >= %d", partitionOffset, cursor.offset); doFlush(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, replyBudgetId, reserved, ex -> ex + traceId, authorization, replyBud, reserved, ex -> ex .set((b, o, l) -> kafkaFlushExRW.wrap(b, o, l) .typeId(kafkaTypeId) .fetch(f -> f @@ -1622,8 +1622,8 @@ private void onClientReplyWindow( final int padding = window.padding(); final int minimum = window.minimum(); - assert replyBudgetId == 0L || replyBudgetId == budgetId : - String.format("%d == 0 || %d == %d)", replyBudgetId, replyBudgetId, budgetId); + assert replyBud == 0L || replyBud == budgetId : + String.format("%d == 0 || %d == %d)", replyBud, replyBud, budgetId); assert acknowledge <= sequence; assert sequence <= replySeq; @@ -1634,17 +1634,17 @@ private void onClientReplyWindow( this.replyMax = maximum; this.replyPad = padding; this.replyMin = minimum; - this.replyBudgetId = budgetId; + this.replyBud = budgetId; if (!KafkaState.replyOpened(state)) { state = KafkaState.openedReply(state); - if (replyBudgetId != NO_BUDGET_ID && replyDebitorIndex == NO_DEBITOR_INDEX) + if (replyBud != NO_BUDGET_ID && replyDebIndex == NO_DEBITOR_INDEX) { - replyDebitor = supplyDebitor.apply(replyBudgetId); - replyDebitorIndex = replyDebitor.acquire(replyBudgetId, replyId, this::doClientReplyDataIfNecessary); - assert replyDebitorIndex != NO_DEBITOR_INDEX; + replyDeb = supplyDebitor.apply(replyBud); + replyDebIndex = replyDeb.acquire(replyBud, replyId, this::doClientReplyDataIfNecessary); + assert replyDebIndex != NO_DEBITOR_INDEX; } } @@ -1670,10 +1670,10 @@ private void onClientReplyReset( private void cleanupDebitorIfNecessary() { - if (replyDebitor != null && replyDebitorIndex != NO_DEBITOR_INDEX) + if (replyDeb != null && replyDebIndex != NO_DEBITOR_INDEX) { - replyDebitor.release(replyBudgetId, replyId); - replyDebitorIndex = NO_DEBITOR_INDEX; + replyDeb.release(replyBud, replyId); + replyDebIndex = NO_DEBITOR_INDEX; } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java index ac8f606a3f..74ebdf8244 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFetchFactory.java @@ -1249,9 +1249,9 @@ private int decodeFetchRecord( final int minimum = Math.min(maximum, 1024); int valueClaimed = maximum; - if (valueClaimed != 0 && client.stream.replyDebitorIndex != NO_DEBITOR_INDEX) + if (valueClaimed != 0 && client.stream.replyDebIndex != NO_DEBITOR_INDEX) { - valueClaimed = client.stream.replyDebitor.claim(traceId, client.stream.replyDebitorIndex, + valueClaimed = client.stream.replyDeb.claim(traceId, client.stream.replyDebIndex, client.stream.replyId, minimum, maximum, 0); if (valueClaimed == 0) @@ -1371,9 +1371,9 @@ private int decodeFetchRecordInit( final int minimum = Math.min(maximum, 1024); int valueClaimed = maximum; - if (valueClaimed != 0 && client.stream.replyDebitorIndex != NO_DEBITOR_INDEX) + if (valueClaimed != 0 && client.stream.replyDebIndex != NO_DEBITOR_INDEX) { - valueClaimed = client.stream.replyDebitor.claim(traceId, client.stream.replyDebitorIndex, + valueClaimed = client.stream.replyDeb.claim(traceId, client.stream.replyDebIndex, client.stream.replyId, minimum, maximum, 0); if (valueClaimed == 0) @@ -1449,9 +1449,9 @@ private int decodeFetchRecordValue( final int minimum = Math.min(maximum, 1024); int valueClaimed = maximum; - if (valueClaimed != 0 && client.stream.replyDebitorIndex != NO_DEBITOR_INDEX) + if (valueClaimed != 0 && client.stream.replyDebIndex != NO_DEBITOR_INDEX) { - valueClaimed = client.stream.replyDebitor.claim(traceId, client.stream.replyDebitorIndex, + valueClaimed = client.stream.replyDeb.claim(traceId, client.stream.replyDebIndex, client.stream.replyId, minimum, maximum, 0); if (valueClaimed == 0) @@ -1729,9 +1729,9 @@ private final class KafkaFetchStream private int replyMax; private int replyPad; - private long replyDebitorId; - private BudgetDebitor replyDebitor; - private long replyDebitorIndex = NO_DEBITOR_INDEX; + private long replyBud; + private BudgetDebitor replyDeb; + private long replyDebIndex = NO_DEBITOR_INDEX; KafkaFetchStream( MessageConsumer application, @@ -1866,14 +1866,14 @@ private void onApplicationWindow( this.replyAck = acknowledge; this.replyMax = maximum; this.replyPad = padding; - this.replyDebitorId = budgetId; + this.replyBud = budgetId; assert replyAck <= replySeq; - if (replyDebitorId != 0L && replyDebitor == null) + if (replyBud != 0L && replyDeb == null) { - replyDebitor = supplyDebitor.apply(replyDebitorId); - replyDebitorIndex = replyDebitor.acquire(replyDebitorId, replyId, client::decodeNetworkIfNecessary); + replyDeb = supplyDebitor.apply(replyBud); + replyDebIndex = replyDeb.acquire(replyBud, replyId, client::decodeNetworkIfNecessary); } state = KafkaState.openedReply(state); @@ -1950,7 +1950,7 @@ private void doApplicationData( Flyweight extension) { doData(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, flags, replyDebitorId, reserved, payload, extension); + traceId, authorization, flags, replyBud, reserved, payload, extension); replySeq += reserved; @@ -2101,11 +2101,11 @@ private void cleanupApplication( private void cleanupApplicationDebitorIfNecessary() { - if (replyDebitorIndex != NO_DEBITOR_INDEX) + if (replyDebIndex != NO_DEBITOR_INDEX) { - replyDebitor.release(replyDebitorIndex, replyId); - replyDebitorIndex = NO_DEBITOR_INDEX; - replyDebitor = null; + replyDeb.release(replyDebIndex, replyId); + replyDebIndex = NO_DEBITOR_INDEX; + replyDeb = null; } } diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/stream/SseServerFactory.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/stream/SseServerFactory.java index 46bcbf0558..ae4827c9ee 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/stream/SseServerFactory.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/stream/SseServerFactory.java @@ -347,8 +347,8 @@ private final class SseServer private long httpReplyBud; private int httpReplyPad; private long httpReplyAuth; - private BudgetDebitor replyDebitor; - private long replyDebitorIndex = NO_DEBITOR_INDEX; + private BudgetDebitor replyDeb; + private long replyDebIndex = NO_DEBITOR_INDEX; private SseServer( MessageConsumer network, @@ -518,13 +518,13 @@ private void onNetWindow( assert httpReplyAck <= httpReplySeq; - if (httpReplyBud != 0L && replyDebitorIndex == NO_DEBITOR_INDEX) + if (httpReplyBud != 0L && replyDebIndex == NO_DEBITOR_INDEX) { - replyDebitor = supplyDebitor.apply(budgetId); - replyDebitorIndex = replyDebitor.acquire(budgetId, replyId, this::flushNetwork); + replyDeb = supplyDebitor.apply(budgetId); + replyDebIndex = replyDeb.acquire(budgetId, replyId, this::flushNetwork); } - if (httpReplyBud != 0L && replyDebitorIndex == NO_DEBITOR_INDEX) + if (httpReplyBud != 0L && replyDebIndex == NO_DEBITOR_INDEX) { doNetAbort(traceId, authorization); stream.doAppEndDeferred(traceId, authorization); @@ -617,7 +617,7 @@ else if (name.equals(HEADER_NAME_METHOD)) buffer.putBytes(networkSlotOffset, data.buffer(), data.offset(), data.sizeof()); networkSlotOffset += data.sizeof(); - if (replyDebitorIndex != NO_DEBITOR_INDEX) + if (replyDebIndex != NO_DEBITOR_INDEX) { deferredClaim += data.reserved(); } @@ -784,9 +784,9 @@ private void encodeNetwork( } int claimed = reserved; - if (replyDebitorIndex != NO_DEBITOR_INDEX) + if (replyDebIndex != NO_DEBITOR_INDEX) { - claimed = replyDebitor.claim(traceId, replyDebitorIndex, replyId, + claimed = replyDeb.claim(traceId, replyDebIndex, replyId, reserved, reserved, 0); } @@ -805,9 +805,9 @@ private void encodeNetwork( if (deferredClaim > 0) { - assert replyDebitorIndex != NO_DEBITOR_INDEX; + assert replyDebIndex != NO_DEBITOR_INDEX; - int claimed = replyDebitor.claim(traceId, replyDebitorIndex, replyId, + int claimed = replyDeb.claim(traceId, replyDebIndex, replyId, deferredClaim, deferredClaim, 0); if (claimed == deferredClaim) @@ -851,11 +851,11 @@ private void encodeNetwork( private void cleanupDebitorIfNecessary() { - if (replyDebitorIndex != NO_DEBITOR_INDEX) + if (replyDebIndex != NO_DEBITOR_INDEX) { - replyDebitor.release(replyDebitorIndex, replyId); - replyDebitor = null; - replyDebitorIndex = NO_DEBITOR_INDEX; + replyDeb.release(replyDebIndex, replyId); + replyDeb = null; + replyDebIndex = NO_DEBITOR_INDEX; } } @@ -1100,7 +1100,7 @@ private void onAppEnd( buffer.putBytes(networkSlotOffset, data.buffer(), data.offset(), data.sizeof()); networkSlotOffset += data.sizeof(); - if (replyDebitorIndex != NO_DEBITOR_INDEX) + if (replyDebIndex != NO_DEBITOR_INDEX) { deferredClaim += data.reserved(); } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/budget/DefaultBudgetCreditor.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/budget/DefaultBudgetCreditor.java index cc15787d65..07f3a940ad 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/budget/DefaultBudgetCreditor.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/budget/DefaultBudgetCreditor.java @@ -113,6 +113,12 @@ public long acquire( if (budgetIndex != NO_CREDITOR_INDEX) { budgetIndexById.put(budgetId, budgetIndex); + + if (EngineConfiguration.DEBUG_BUDGETS) + { + System.out.format("[%d] acquire creditor budgetId=%d budgetIndex=%d \n", + System.nanoTime(), budgetId, budgetIndex); + } } return budgetIndex; From cd0e6731857d44d53002fed2f1c739eac564e526 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 17:03:23 -0700 Subject: [PATCH 061/115] Bump org.apache.ivy:ivy from 2.5.1 to 2.5.2 in /manager (#377) Bumps org.apache.ivy:ivy from 2.5.1 to 2.5.2. --- updated-dependencies: - dependency-name: org.apache.ivy:ivy dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- manager/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manager/pom.xml b/manager/pom.xml index bbd7fe34da..11bc4d07ab 100644 --- a/manager/pom.xml +++ b/manager/pom.xml @@ -35,7 +35,7 @@ org.apache.ivy ivy - 2.5.1 + 2.5.2 org.sonatype.plexus From e63de589889b0e7901840b114cda7eeb6bb7a120 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Aug 2023 08:44:55 -0700 Subject: [PATCH 062/115] Bump org.codehaus.mojo:exec-maven-plugin from 1.6.0 to 3.1.0 (#370) * Bump org.codehaus.mojo:exec-maven-plugin from 1.6.0 to 3.1.0 Bumps [org.codehaus.mojo:exec-maven-plugin](https://github.com/mojohaus/exec-maven-plugin) from 1.6.0 to 3.1.0. - [Release notes](https://github.com/mojohaus/exec-maven-plugin/releases) - [Commits](https://github.com/mojohaus/exec-maven-plugin/compare/exec-maven-plugin-1.6.0...exec-maven-plugin-3.1.0) --- updated-dependencies: - dependency-name: org.codehaus.mojo:exec-maven-plugin dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Use exec-maven-plugin version from dependencyManagement --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: John Fallows --- cloud/docker-image/pom.xml | 1 - pom.xml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 3b6fdbd3fe..cf0618c24d 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -212,7 +212,6 @@ org.codehaus.mojo exec-maven-plugin - 3.0.0 ${project.groupId} diff --git a/pom.xml b/pom.xml index 802477b5d1..c6b4dad4f1 100644 --- a/pom.xml +++ b/pom.xml @@ -358,7 +358,7 @@ org.codehaus.mojo exec-maven-plugin - 1.6.0 + 3.1.0 org.moditect From c7a633f1e5c54d0f8e24e72a56599a97172b3ef4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Aug 2023 09:31:26 -0700 Subject: [PATCH 063/115] Bump junit:junit from 4.13.1 to 4.13.2 (#365) Bumps [junit:junit](https://github.com/junit-team/junit4) from 4.13.1 to 4.13.2. - [Release notes](https://github.com/junit-team/junit4/releases) - [Changelog](https://github.com/junit-team/junit4/blob/main/doc/ReleaseNotes4.13.1.md) - [Commits](https://github.com/junit-team/junit4/compare/r4.13.1...r4.13.2) --- updated-dependencies: - dependency-name: junit:junit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index c6b4dad4f1..8374dc08cc 100644 --- a/pom.xml +++ b/pom.xml @@ -100,7 +100,7 @@ junit junit - 4.13.1 + 4.13.2 org.hamcrest @@ -483,7 +483,7 @@ junit junit - 4.13.1 + 4.13.2 From a994a516106bbf19e816e3d97ed14eb48ae9ffde Mon Sep 17 00:00:00 2001 From: bmaidics Date: Thu, 24 Aug 2023 23:51:34 +0200 Subject: [PATCH 064/115] Send will message as data frame + reject large packets (#363) --- .../client.rpt | 2 + .../server.rpt | 2 + .../session.client.sent.reset/client.rpt | 1 + .../session.client.sent.reset/server.rpt | 1 + .../kafka/session.client.takeover/client.rpt | 5 + .../kafka/session.client.takeover/server.rpt | 5 + .../session.exists.clean.start/client.rpt | 5 + .../session.exists.clean.start/server.rpt | 5 + .../client.rpt | 1 + .../server.rpt | 1 + .../session.server.sent.reset/client.rpt | 1 + .../session.server.sent.reset/server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../kafka/session.subscribe/client.rpt | 1 + .../kafka/session.subscribe/server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../client.rpt | 4 +- .../server.rpt | 3 +- .../mqtt/session.client.sent.reset/client.rpt | 2 +- .../mqtt/session.client.sent.reset/server.rpt | 2 +- .../mqtt/session.client.takeover/client.rpt | 4 +- .../mqtt/session.client.takeover/server.rpt | 2 + .../session.exists.clean.start/client.rpt | 4 +- .../session.exists.clean.start/server.rpt | 2 + .../mqtt/session.server.sent.reset/client.rpt | 2 +- .../mqtt/session.server.sent.reset/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 1 + .../streams/mqtt/session.subscribe/client.rpt | 2 +- .../streams/mqtt/session.subscribe/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../stream/MqttKafkaSessionFactory.java | 4 +- .../binding/mqtt/internal/MqttFunctions.java | 400 ++++-------------- .../main/resources/META-INF/zilla/mqtt.idl | 34 +- .../client.rpt | 7 + .../server.rpt | 7 + .../session.client.takeover/client.rpt | 7 + .../session.client.takeover/server.rpt | 7 + .../client.rpt | 2 +- .../server.rpt | 2 +- .../session.exists.clean.start/client.rpt | 8 + .../session.exists.clean.start/server.rpt | 8 + .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 14 + .../server.rpt | 14 + .../application/session.subscribe/client.rpt | 7 + .../application/session.subscribe/server.rpt | 7 + .../client.rpt | 14 + .../server.rpt | 14 + .../client.rpt | 14 + .../server.rpt | 14 + .../client.rpt | 7 + .../server.rpt | 7 + .../client.rpt | 28 +- .../server.rpt | 28 +- .../client.rpt | 43 -- .../server.rpt | 46 -- .../client.rpt | 25 +- .../server.rpt | 25 +- .../session.will.message.retain/client.rpt | 21 +- .../session.will.message.retain/server.rpt | 21 +- .../network/client.sent.abort/client.rpt | 5 +- .../network/client.sent.abort/server.rpt | 5 +- .../network/client.sent.close/client.rpt | 5 +- .../network/client.sent.close/server.rpt | 5 +- .../network/client.sent.reset/client.rpt | 5 +- .../network/client.sent.reset/server.rpt | 5 +- .../client.rpt | 22 +- .../server.rpt | 26 +- .../client.rpt | 25 +- .../server.rpt | 21 +- .../network/connect.maximum.qos.0/client.rpt | 5 +- .../network/connect.maximum.qos.0/server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 9 +- .../client.rpt | 42 ++ .../server.rpt | 43 ++ .../connect.reject.second.connect/client.rpt | 7 +- .../connect.reject.second.connect/server.rpt | 9 +- .../connect.retain.not.supported/client.rpt | 6 +- .../connect.retain.not.supported/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../connect.subscribe.unfragmented/client.rpt | 6 +- .../connect.subscribe.unfragmented/server.rpt | 17 +- .../connect.successful.fragmented/client.rpt | 5 +- .../connect.successful.fragmented/server.rpt | 23 +- .../network/connect.successful/client.rpt | 5 +- .../network/connect.successful/server.rpt | 23 +- .../client.rpt | 11 +- .../server.rpt | 9 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 9 +- .../streams/network/disconnect/client.rpt | 5 +- .../streams/network/disconnect/server.rpt | 5 +- .../network/ping.keep.alive/client.rpt | 51 +-- .../network/ping.keep.alive/server.rpt | 51 +-- .../mqtt/streams/network/ping/client.rpt | 5 +- .../mqtt/streams/network/ping/server.rpt | 5 +- .../network/publish.empty.message/client.rpt | 5 +- .../network/publish.empty.message/server.rpt | 5 +- .../publish.empty.retained.message/client.rpt | 5 +- .../publish.empty.retained.message/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 7 +- .../server.rpt | 7 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 10 +- .../server.rpt | 10 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../publish.multiple.messages/client.rpt | 5 +- .../publish.multiple.messages/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 25 +- .../network/publish.one.message/client.rpt | 5 +- .../network/publish.one.message/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 54 +++ .../server.rpt | 55 +++ .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../network/publish.retained/client.rpt | 5 +- .../network/publish.retained/server.rpt | 5 +- .../publish.topic.not.routed/client.rpt | 5 +- .../publish.topic.not.routed/server.rpt | 5 +- .../client.rpt | 41 +- .../server.rpt | 43 +- .../client.rpt | 41 +- .../server.rpt | 43 +- .../publish.with.user.property/client.rpt | 35 +- .../publish.with.user.property/server.rpt | 37 +- .../client.rpt | 30 +- .../server.rpt | 44 +- .../session.client.takeover/client.rpt | 31 +- .../session.client.takeover/server.rpt | 22 +- .../client.rpt | 40 ++ .../server.rpt | 41 ++ .../client.rpt | 26 +- .../server.rpt | 26 +- .../session.exists.clean.start/client.rpt | 20 +- .../session.exists.clean.start/server.rpt | 52 +-- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 73 ++-- .../client.rpt | 5 +- .../server.rpt | 23 +- .../network/session.subscribe/client.rpt | 5 +- .../network/session.subscribe/server.rpt | 9 +- .../client.rpt | 79 ++-- .../server.rpt | 55 +-- .../client.rpt | 9 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../session.will.message.retain/client.rpt | 6 +- .../session.will.message.retain/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 5 +- .../subscribe.invalid.topic.filter/client.rpt | 9 +- .../subscribe.invalid.topic.filter/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../network/subscribe.one.message/client.rpt | 5 +- .../network/subscribe.one.message/server.rpt | 5 +- .../subscribe.publish.no.local/client.rpt | 5 +- .../subscribe.publish.no.local/server.rpt | 5 +- .../client.rpt | 10 +- .../server.rpt | 10 +- .../client.rpt | 10 +- .../server.rpt | 12 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../subscribe.receive.message/client.rpt | 5 +- .../subscribe.receive.message/server.rpt | 7 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../subscribe.reject.no.local/client.rpt | 5 +- .../subscribe.reject.no.local/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 23 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../subscribe.retain.as.published/client.rpt | 5 +- .../subscribe.retain.as.published/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 31 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 23 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 47 +- .../unsubscribe.after.subscribe/client.rpt | 9 +- .../unsubscribe.after.subscribe/server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 5 +- .../client.rpt | 10 +- .../server.rpt | 6 +- .../client.rpt | 9 +- .../server.rpt | 6 +- .../client.rpt | 9 +- .../server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 5 +- .../mqtt/internal/MqttFunctionsTest.java | 220 +++++----- .../mqtt/streams/application/SessionIT.java | 15 +- .../mqtt/streams/network/ConnectionIT.java | 9 + .../mqtt/streams/network/PublishIT.java | 9 + .../mqtt/internal/MqttReasonCodes.java | 1 + .../internal/stream/MqttServerFactory.java | 313 +++++++++----- .../mqtt/internal/stream/ConnectionIT.java | 16 + .../mqtt/internal/stream/PublishIT.java | 16 + .../mqtt/internal/stream/SessionIT.java | 19 +- .../command/log/internal/LoggableStream.java | 45 +- 301 files changed, 2286 insertions(+), 1750 deletions(-) rename incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/{session.will.message.disconnect.with.will.message => session.will.message.abort}/client.rpt (66%) rename incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/{session.will.message.disconnect.with.will.message => session.will.message.abort}/server.rpt (67%) delete mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.no.ping.within.keep.alive/client.rpt delete mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.no.ping.within.keep.alive/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt index cc1a004b2b..5104bd0301 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -184,6 +185,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt index 5023162a29..72b69fe1d9 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -170,6 +171,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt index b17bc2c18a..c9593137f9 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt index 9761a45495..dc43780c91 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt index 72f713d0a6..398eb8c11c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -146,6 +147,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -157,6 +159,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -220,6 +223,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -234,6 +238,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt index 16e4a117a9..1c0f47f779 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -146,6 +147,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -159,6 +161,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -212,6 +215,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -228,6 +232,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt index d7238700bd..47d321a2c2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -146,6 +147,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -159,6 +161,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -221,6 +224,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -233,6 +237,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt index 12fd62832f..116f1da2d8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -146,6 +147,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -160,6 +162,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -215,6 +218,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} @@ -229,6 +233,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt index 74a4d2c97d..232d3145da 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt index 1f89f2850f..a8decf49d2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt index 582dbc7019..0f6ef624fc 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt index d21303343b..7e5a10c960 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt index 0a954ff579..974063e3b7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt index 9db846d27a..4287d0b897 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt index cec9c6a266..c72a043fc6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt index eddaecfc04..41577d3aef 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt index b5ef64ebdc..868fe4a5c5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt index c1e0f2eddb..d8a4504d4d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt index de5e39f5b4..55efc89d01 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt @@ -37,6 +37,7 @@ write zilla:data.ext ${kafka:dataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt index 2b4dc1b281..9cf6f4f03e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt @@ -39,6 +39,7 @@ read zilla:data.ext ${kafka:matchDataEx() .deferred(0) .partition(-1, -1) .key("client-1#migrate") + .hashKey("client-1") .header("sender-id", "sender-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt index 8429c220cc..58c446c456 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} @@ -70,8 +70,8 @@ connect await SESSION1_ABORTED write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt index ad9ee2ce1f..1505e82923 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt @@ -23,8 +23,8 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} @@ -66,6 +66,7 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .expiry(1) .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt index a225126b73..628c0114fe 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt index be2069e676..7afd1331b1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt @@ -22,8 +22,8 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt index 5a8557316e..5e04d0a12f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} @@ -68,8 +68,8 @@ connect await CONNECTED write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt index b99a7bfec5..d4e46af36c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt @@ -22,6 +22,7 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .expiry(1) .clientId("client-1") .build() .build()} @@ -66,6 +67,7 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .expiry(1) .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt index ac9a831686..d5d7c03eec 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} @@ -73,8 +73,8 @@ connect await SESSION_READY write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt index 496822dd17..f79622e0db 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt @@ -22,6 +22,7 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .expiry(1) .clientId("client-1") .build() .build()} @@ -63,6 +64,7 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .expiry(1) .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt index c3f002620c..d45c693511 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt index 1802b2d422..3a13bf2953 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt @@ -22,8 +22,8 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt index 640f139646..64a0c9dfe3 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt index 15212eda0a..5a6717a9be 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt @@ -22,6 +22,7 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .expiry(1) .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt index a16768f84d..1919464e2e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt index a75a7dff44..dc487358ef 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt @@ -22,8 +22,8 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt index 7195298048..1d9b28e1ab 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt index a15b6da914..9a66280e9e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt @@ -23,8 +23,8 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt index 21507a821f..936ce63059 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt @@ -20,8 +20,8 @@ connect "zilla://streams/mqtt0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt index b62e481f0d..0d80c4b9a7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt @@ -23,8 +23,8 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("client-1") .expiry(1) + .clientId("client-1") .build() .build()} diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index 4b38221c86..dc11a6bb44 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -311,7 +311,7 @@ private void onMqttData( .timestamp(now().toEpochMilli()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(clientId.length()) - .value(clientId.value(), 0, clientId.length()))) + .value(clientId.value(), 0, clientId.length()))) .build(); if (sessionState != null) @@ -797,6 +797,8 @@ protected void sendMigrateSignal(long authorization, long traceId) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(delegate.clientIdMigrate.length()) .value(delegate.clientIdMigrate.value(), 0, delegate.clientIdMigrate.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) .headersItem(c -> c.nameLen(SENDER_ID_NAME.length()) .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) .valueLen(delegate.sessionId.length()) diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index 697c73aa40..b1ef279da1 100644 --- a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -19,6 +19,7 @@ import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.concurrent.ThreadLocalRandom; import java.util.function.Predicate; import org.agrona.DirectBuffer; @@ -30,12 +31,12 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.Array32FW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttBinaryFW; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttEndReasonCode; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttMessageFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormat; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormatFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPublishFlags; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttQoS; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionFlags; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionStateFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSubscribeFlags; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttTopicFilterFW; @@ -44,13 +45,14 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.Varuint32FW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttDataExFW; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttEndExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttExtensionKind; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttFlushExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttPublishBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttPublishDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttResetExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionBeginExFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionDataExFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionDataKind; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSubscribeBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSubscribeDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSubscribeFlushExFW; @@ -99,12 +101,6 @@ public static MqttFlushExBuilder flushEx() return new MqttFlushExBuilder(); } - @Function - public static MqttEndExBuilder endEx() - { - return new MqttEndExBuilder(); - } - @Function public static MqttResetExBuilder resetEx() { @@ -117,6 +113,24 @@ public static MqttSessionStateBuilder session() return new MqttSessionStateBuilder(); } + @Function + public static MqttWillMessageBuilder will() + { + return new MqttWillMessageBuilder(); + } + + @Function + public static byte[] randomBytes( + int length) + { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; i++) + { + bytes[i] = (byte) ThreadLocalRandom.current().nextInt(0x100); + } + return bytes; + } + public static final class MqttBeginExBuilder { private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); @@ -190,30 +204,29 @@ public MqttSessionBeginExBuilder expiry( return this; } - public MqttSessionBeginExBuilder serverReference( - String serverReference) + public MqttSessionBeginExBuilder serverRef( + String serverRef) { - sessionBeginExRW.serverReference(serverReference); + sessionBeginExRW.serverRef(serverRef); return this; } - public MqttWillMessageBuilder will() + public MqttSessionBeginExBuilder flags( + String... flagNames) { - return new MqttWillMessageBuilder(this); + int flags = Arrays.stream(flagNames) + .mapToInt(flag -> 1 << MqttSessionFlags.valueOf(flag).value()) + .reduce(0, (a, b) -> a | b); + sessionBeginExRW.flags(flags); + return this; } public MqttBeginExBuilder build() { - final MqttSessionBeginExFW subscribeBeginEx = sessionBeginExRW.build(); - beginExRO.wrap(writeBuffer, 0, subscribeBeginEx.limit()); + final MqttSessionBeginExFW sessionBeginEx = sessionBeginExRW.build(); + beginExRO.wrap(writeBuffer, 0, sessionBeginEx.limit()); return MqttBeginExBuilder.this; } - - private void willMessage( - MqttMessageFW willMessage) - { - sessionBeginExRW.will(willMessage); - } } public final class MqttSubscribeBeginExBuilder @@ -345,6 +358,13 @@ public MqttDataExBuilder.MqttPublishDataExBuilder publish() return new MqttDataExBuilder.MqttPublishDataExBuilder(); } + public MqttDataExBuilder.MqttSessionDataExBuilder session() + { + dataExRW.kind(MqttExtensionKind.SESSION.value()); + + return new MqttDataExBuilder.MqttSessionDataExBuilder(); + } + public byte[] build() { final MqttDataExFW dataEx = dataExRO; @@ -530,8 +550,32 @@ public MqttPublishDataExBuilder userProperty( public MqttDataExBuilder build() { - final MqttPublishDataExFW publishBeginEx = publishDataExRW.build(); - dataExRO.wrap(writeBuffer, 0, publishBeginEx.limit()); + final MqttPublishDataExFW publishDataEx = publishDataExRW.build(); + dataExRO.wrap(writeBuffer, 0, publishDataEx.limit()); + return MqttDataExBuilder.this; + } + } + + public final class MqttSessionDataExBuilder + { + private final MqttSessionDataExFW.Builder sessionDataExRW = new MqttSessionDataExFW.Builder(); + + private MqttSessionDataExBuilder() + { + sessionDataExRW.wrap(writeBuffer, MqttBeginExFW.FIELD_OFFSET_SESSION, writeBuffer.capacity()); + } + + public MqttSessionDataExBuilder kind( + String kind) + { + sessionDataExRW.kind(k -> k.set(MqttSessionDataKind.valueOf(kind))); + return this; + } + + public MqttDataExBuilder build() + { + final MqttSessionDataExFW sessionDataEx = sessionDataExRW.build(); + dataExRO.wrap(writeBuffer, 0, sessionDataEx.limit()); return MqttDataExBuilder.this; } } @@ -612,39 +656,6 @@ public byte[] build() } } - public static final class MqttEndExBuilder - { - private final MqttEndExFW.Builder endExRW; - - private MqttEndExBuilder() - { - MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); - this.endExRW = new MqttEndExFW.Builder().wrap(writeBuffer, 0, writeBuffer.capacity()); - } - - public MqttEndExBuilder typeId( - int typeId) - { - endExRW.typeId(typeId); - return this; - } - - public MqttEndExBuilder reason( - String reason) - { - endExRW.reasonCode(r -> r.set(MqttEndReasonCode.valueOf(reason))); - return this; - } - - public byte[] build() - { - final MqttEndExFW endEx = endExRW.build(); - final byte[] array = new byte[endEx.sizeof()]; - endEx.buffer().getBytes(endEx.offset(), array); - return array; - } - } - public static final class MqttResetExBuilder { private final MqttResetExFW.Builder resetExRW; @@ -662,10 +673,10 @@ public MqttResetExBuilder typeId( return this; } - public MqttResetExBuilder serverReference( - String serverReference) + public MqttResetExBuilder serverRef( + String serverRef) { - resetExRW.serverReference(serverReference); + resetExRW.serverRef(serverRef); return this; } @@ -729,11 +740,9 @@ public byte[] build() public static final class MqttWillMessageBuilder { private final MqttMessageFW.Builder willMessageRW = new MqttMessageFW.Builder(); - private final MqttBeginExBuilder.MqttSessionBeginExBuilder beginExBuilder; - private MqttWillMessageBuilder(MqttBeginExBuilder.MqttSessionBeginExBuilder beginExBuilder) + private MqttWillMessageBuilder() { - this.beginExBuilder = beginExBuilder; MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); willMessageRW.wrap(writeBuffer, 0, writeBuffer.capacity()); } @@ -833,10 +842,12 @@ public MqttWillMessageBuilder payloadBytes( return this; } - public MqttBeginExBuilder.MqttSessionBeginExBuilder build() + public byte[] build() { - beginExBuilder.willMessage(willMessageRW.build()); - return beginExBuilder; + final MqttMessageFW willMessage = willMessageRW.build(); + final byte[] array = new byte[willMessage.sizeof()]; + willMessage.buffer().getBytes(willMessage.offset(), array); + return array; } } @@ -1093,9 +1104,9 @@ private boolean matchFilters( public final class MqttSessionBeginExMatcherBuilder { private String16FW clientId; - private String16FW serverReference; + private String16FW serverRef; private Integer expiry; - private MqttWillMessageMatcherBuilder willMessageMatcher; + private Integer flags; private MqttSessionBeginExMatcherBuilder() { @@ -1115,17 +1126,20 @@ public MqttSessionBeginExMatcherBuilder expiry( return this; } - public MqttSessionBeginExMatcherBuilder serverReference( - String serverReference) + public MqttSessionBeginExMatcherBuilder serverRef( + String serverRef) { - this.serverReference = new String16FW(serverReference); + this.serverRef = new String16FW(serverRef); return this; } - public MqttWillMessageMatcherBuilder will() + public MqttSessionBeginExMatcherBuilder flags( + String... flagNames) { - this.willMessageMatcher = new MqttWillMessageMatcherBuilder(); - return willMessageMatcher; + this.flags = Arrays.stream(flagNames) + .mapToInt(flag -> 1 << MqttSessionFlags.valueOf(flag).value()) + .reduce(0, (a, b) -> a | b); + return this; } public MqttBeginExMatcherBuilder build() @@ -1137,11 +1151,10 @@ private boolean match( MqttBeginExFW beginEx) { final MqttSessionBeginExFW sessionBeginEx = beginEx.session(); - final MqttMessageFW willMessage = beginEx.session().will(); return matchClientId(sessionBeginEx) && matchExpiry(sessionBeginEx) && - matchServerReference(sessionBeginEx) && - (willMessageMatcher == null || willMessageMatcher.match(willMessage)); + matchFlags(sessionBeginEx) && + matchserverRef(sessionBeginEx); } private boolean matchClientId( @@ -1156,241 +1169,16 @@ private boolean matchExpiry( return expiry == null || expiry == sessionBeginEx.expiry(); } - private boolean matchServerReference( + private boolean matchFlags( final MqttSessionBeginExFW sessionBeginEx) { - return serverReference == null || serverReference.equals(sessionBeginEx.serverReference()); + return flags == null || flags == sessionBeginEx.flags(); } - public final class MqttWillMessageMatcherBuilder + private boolean matchserverRef( + final MqttSessionBeginExFW sessionBeginEx) { - private MqttBinaryFW.Builder correlationRW; - private final DirectBuffer correlationRO = new UnsafeBuffer(0, 0); - private MqttBinaryFW.Builder payloadRW; - private final DirectBuffer payloadRO = new UnsafeBuffer(0, 0); - private String16FW topic; - private Integer delay; - private Integer qos; - private Integer flags; - private Integer expiryInterval = -1; - private String16FW contentType; - private MqttPayloadFormatFW format; - private String16FW responseTopic; - private Array32FW.Builder userPropertiesRW; - - private MqttWillMessageMatcherBuilder() - { - } - - public MqttWillMessageMatcherBuilder topic( - String topic) - { - this.topic = new String16FW(topic); - return this; - } - - public MqttWillMessageMatcherBuilder delay( - int delay) - { - this.delay = delay; - return this; - } - - public MqttWillMessageMatcherBuilder qos( - String qos) - { - this.qos = MqttQoS.valueOf(qos).ordinal(); - return this; - } - - public MqttWillMessageMatcherBuilder flags( - String... flagNames) - { - int flags = Arrays.stream(flagNames) - .mapToInt(flag -> 1 << MqttPublishFlags.valueOf(flag).ordinal()) - .reduce(0, (a, b) -> a | b); - this.flags = flags; - return this; - } - - public MqttWillMessageMatcherBuilder expiryInterval( - int expiryInterval) - { - this.expiryInterval = expiryInterval; - return this; - } - - public MqttWillMessageMatcherBuilder contentType( - String contentType) - { - this.contentType = new String16FW(contentType); - return this; - } - - public MqttWillMessageMatcherBuilder format( - String format) - { - MqttPayloadFormatFW.Builder builder = - new MqttPayloadFormatFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - this.format = builder.set(MqttPayloadFormat.valueOf(format)).build(); - return this; - } - - public MqttWillMessageMatcherBuilder responseTopic( - String topic) - { - this.responseTopic = new String16FW(topic); - return this; - } - - public MqttWillMessageMatcherBuilder correlation( - String correlation) - { - assert correlationRW == null; - correlationRW = new MqttBinaryFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - - correlationRO.wrap(correlation.getBytes(UTF_8)); - correlationRW.bytes(correlationRO, 0, correlationRO.capacity()); - - return this; - } - - public MqttWillMessageMatcherBuilder correlationBytes( - byte[] correlation) - { - assert correlationRW == null; - correlationRW = new MqttBinaryFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - - correlationRO.wrap(correlation); - correlationRW.bytes(correlationRO, 0, correlationRO.capacity()); - - return this; - } - - public MqttWillMessageMatcherBuilder userProperty( - String name, - String value) - { - if (userPropertiesRW == null) - { - this.userPropertiesRW = - new Array32FW.Builder<>(new MqttUserPropertyFW.Builder(), new MqttUserPropertyFW()) - .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - } - userPropertiesRW.item(p -> p.key(name).value(value)); - return this; - } - - public MqttWillMessageMatcherBuilder payload( - String payload) - { - assert payloadRW == null; - payloadRW = new MqttBinaryFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - - payloadRO.wrap(payload.getBytes(UTF_8)); - payloadRW.bytes(payloadRO, 0, payloadRO.capacity()); - - return this; - } - - public MqttWillMessageMatcherBuilder payloadBytes( - byte[] payload) - { - assert payloadRW == null; - payloadRW = new MqttBinaryFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - - payloadRO.wrap(payload); - payloadRW.bytes(payloadRO, 0, payloadRO.capacity()); - - return this; - } - - public MqttSessionBeginExMatcherBuilder build() - { - return MqttSessionBeginExMatcherBuilder.this; - } - - private boolean match( - MqttMessageFW willMessage) - { - return matchTopic(willMessage) && - matchDelay(willMessage) && - matchQos(willMessage) && - matchFlags(willMessage) && - matchExpiryInterval(willMessage) && - matchContentType(willMessage) && - matchFormat(willMessage) && - matchResponseTopic(willMessage) && - matchCorrelation(willMessage) && - matchUserProperties(willMessage) && - matchPayload(willMessage); - } - - private boolean matchTopic( - final MqttMessageFW willMessage) - { - return topic == null || topic.equals(willMessage.topic()); - } - - private boolean matchDelay( - final MqttMessageFW willMessage) - { - return delay == null || delay == willMessage.delay(); - } - - private boolean matchQos( - final MqttMessageFW willMessage) - { - return qos == null || qos == willMessage.qos(); - } - - private boolean matchFlags( - final MqttMessageFW willMessage) - { - return flags == null || flags == willMessage.flags(); - } - - private boolean matchExpiryInterval( - final MqttMessageFW willMessage) - { - return expiryInterval == null || expiryInterval == willMessage.expiryInterval(); - } - - private boolean matchContentType( - final MqttMessageFW willMessage) - { - return contentType == null || contentType.equals(willMessage.contentType()); - } - - private boolean matchFormat( - final MqttMessageFW willMessage) - { - return format == null || format.equals(willMessage.format()); - } - - private boolean matchResponseTopic( - final MqttMessageFW willMessage) - { - return responseTopic == null || responseTopic.equals(willMessage.responseTopic()); - } - - private boolean matchCorrelation( - final MqttMessageFW willMessage) - { - return correlationRW == null || correlationRW.build().equals(willMessage.correlation()); - } - - private boolean matchUserProperties( - final MqttMessageFW willMessage) - { - return userPropertiesRW == null || userPropertiesRW.build().equals(willMessage.properties()); - } - - private boolean matchPayload( - final MqttMessageFW willMessage) - { - return payloadRW == null || payloadRW.build().equals(willMessage.payload()); - } + return serverRef == null || serverRef.equals(sessionBeginEx.serverRef()); } } } diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index b2e51a0efb..7265b549b3 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -15,6 +15,12 @@ */ scope mqtt { + enum MqttSessionFlags (uint8) + { + CLEAN_START(1), + WILL(2) + } + enum MqttSubscribeFlags (uint8) { SEND_RETAINED(0), @@ -42,13 +48,6 @@ scope mqtt SESSION(3) } - enum MqttEndReasonCode (uint8) - { - DISCONNECT(0), - KEEP_ALIVE_EXPIRY(1), - DISCONNECT_WITH_WILL(2) - } - enum MqttPayloadFormat { BINARY, @@ -114,10 +113,10 @@ scope mqtt struct MqttSessionBeginEx { - string16 clientId; + uint8 flags = 0; int32 expiry = 0; - string16 serverReference = null; - MqttMessage will; + string16 clientId; + string16 serverRef = null; } struct MqttSubscribeBeginEx @@ -137,6 +136,7 @@ scope mqtt { case 0: mqtt::stream::MqttPublishDataEx publish; case 1: mqtt::stream::MqttSubscribeDataEx subscribe; + case 2: mqtt::stream::MqttSessionDataEx session; } struct MqttSubscribeDataEx @@ -167,14 +167,20 @@ scope mqtt MqttUserProperty[] properties; } - struct MqttResetEx extends core::stream::Extension + enum MqttSessionDataKind { - string16 serverReference = null; + STATE, + WILL } - struct MqttEndEx extends core::stream::Extension + struct MqttSessionDataEx + { + MqttSessionDataKind kind; + } + + struct MqttResetEx extends core::stream::Extension { - MqttEndReasonCode reasonCode = DISCONNECT; + string16 serverRef = null; } union MqttFlushEx switch (uint8) extends core::stream::Extension diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt index 72c682e759..ca638f0b15 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt @@ -29,6 +29,13 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt index 8a8d7a410a..93307a50fe 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt @@ -32,6 +32,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt index 77f2824ab1..0b68e3386c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt @@ -27,6 +27,13 @@ write zilla:begin.ext ${mqtt:beginEx() connected +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt index 42a990e7c1..3f6dfd1e96 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt @@ -32,6 +32,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt index 7c06e2e3a0..28f707919e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt @@ -21,8 +21,8 @@ connect "zilla://streams/app0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("one") .expiry(1) + .clientId("one") .build() .build()} connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt index 113f780150..e4da874660 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt @@ -23,8 +23,8 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() - .clientId("one") .expiry(1) + .clientId("one") .build() .build()} connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt index 1e1c2aed10..7bf3e9eb1d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt @@ -29,6 +29,13 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -72,6 +79,7 @@ connect await SESSION_READY write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() + .flags("CLEAN_START") .clientId("client") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt index 07a9968ea5..4161f89687 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt @@ -32,6 +32,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -66,6 +73,7 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .flags("CLEAN_START") .clientId("client") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt index e71aa48366..2e7698520a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverReference("localhost:1883") + .serverRef("localhost:1883") .build() .build()} @@ -32,7 +32,7 @@ read zilla:data.empty read zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverReference("localhost:1884") + .serverRef("localhost:1884") .build()} write aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt index d74bfdc289..41c25ac043 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverReference("localhost:1883") + .serverRef("localhost:1883") .build() .build()} @@ -35,7 +35,7 @@ write flush write zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverReference("localhost:1884") + .serverRef("localhost:1884") .build()} read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt index 9b849f7b88..421ed4976a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverReference("localhost:1883") + .serverRef("localhost:1883") .build() .build()} @@ -30,7 +30,7 @@ connected read zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverReference("localhost:1884") + .serverRef("localhost:1884") .build()} write aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt index c80cf2a278..6e8b43130e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverReference("localhost:1883") + .serverRef("localhost:1883") .build() .build()} @@ -32,7 +32,7 @@ connected write zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverReference("localhost:1884") + .serverRef("localhost:1884") .build()} read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt index 22d2e6529e..9ea94267e0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt @@ -29,6 +29,13 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -38,6 +45,13 @@ read ${mqtt:session() .subscription("sensor/one", 1) .build()} +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .subscription("sensor/two", 2) diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt index 1f43fea32d..0e04154821 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt @@ -32,6 +32,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -41,6 +48,13 @@ write ${mqtt:session() .build()} write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .subscription("sensor/two", 2) diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt index bf293f83a1..578940e7d5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt @@ -29,6 +29,13 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt index 0e78132b91..c5027bb198 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt @@ -32,6 +32,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt index 200d21fc64..16cfaa112a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt @@ -29,6 +29,13 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .subscription("sensor/two", 1) @@ -40,6 +47,13 @@ read ${mqtt:session() .subscription("sensor/two", 1) .build()} +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt index 6243dda9e3..95ffcd3aaa 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt @@ -31,6 +31,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .subscription("sensor/two", 1) @@ -43,6 +50,13 @@ write ${mqtt:session() .build()} write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt index a6cfebd9c9..8e7264694f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt @@ -29,6 +29,13 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -37,6 +44,13 @@ read ${mqtt:session() .subscription("sensor/one", 1) .build()} +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt index 8b8acf889c..f4a805c480 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt @@ -33,6 +33,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -42,6 +49,13 @@ write ${mqtt:session() .build()} write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt index e7af77b53c..e49c39952f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt @@ -30,6 +30,13 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt index 87f98db3e8..1ac1e8f037 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt @@ -33,6 +33,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.disconnect.with.will.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt similarity index 66% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.disconnect.with.will.message/client.rpt rename to incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt index 327eacf0dd..51937e1f9e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.disconnect.with.will.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt @@ -21,23 +21,27 @@ connect "zilla://streams/app0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() + .flags("WILL", "CLEAN_START") .clientId("one") - .will() - .topic("wills/one") - .format("TEXT") - .payload("client one session expired") - .build() .build() .build()} connected -read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +write ${mqtt:will() + .topic("wills/one") + .format("TEXT") + .payload("client one session expired") + .build()} +write flush -write zilla:end.ext ${mqtt:endEx() - .typeId(zilla:id("mqtt")) - .reason("DISCONNECT_WITH_WILL") - .build()} +read zilla:data.empty -write close -read closed +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.disconnect.with.will.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt similarity index 67% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.disconnect.with.will.message/server.rpt rename to incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt index 7fa2aa8b91..fb26a8d19a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.disconnect.with.will.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt @@ -23,24 +23,28 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .flags("WILL", "CLEAN_START") .clientId("one") - .will() - .topic("wills/one") - .format("TEXT") - .payload("client one session expired") - .build() .build() .build()} connected + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +read ${mqtt:will() + .topic("wills/one") + .format("TEXT") + .payload("client one session expired") + .build()} + write zilla:data.empty write flush -read zilla:end.ext ${mqtt:endEx() - .typeId(zilla:id("mqtt")) - .reason("DISCONNECT_WITH_WILL") - .build()} - -read closed -write close +read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.no.ping.within.keep.alive/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.no.ping.within.keep.alive/client.rpt deleted file mode 100644 index 1a916fea30..0000000000 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.no.ping.within.keep.alive/client.rpt +++ /dev/null @@ -1,43 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc. -# -# Aklivity licenses this file to you under the Apache License, -# version 2.0 (the "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -connect "zilla://streams/app0" - option zilla:window 8192 - option zilla:transmission "duplex" - -write zilla:begin.ext ${mqtt:beginEx() - .typeId(zilla:id("mqtt")) - .session() - .clientId("one") - .will() - .topic("wills/one") - .format("TEXT") - .payload("client one session expired") - .build() - .build() - .build()} - -connected - -read zilla:data.empty - -write zilla:end.ext ${mqtt:endEx() - .typeId(zilla:id("mqtt")) - .reason("KEEP_ALIVE_EXPIRY") - .build()} - -write close -read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.no.ping.within.keep.alive/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.no.ping.within.keep.alive/server.rpt deleted file mode 100644 index 7d09b0668c..0000000000 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.no.ping.within.keep.alive/server.rpt +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc. -# -# Aklivity licenses this file to you under the Apache License, -# version 2.0 (the "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -accept "zilla://streams/app0" - option zilla:window 8192 - option zilla:transmission "duplex" - -accepted - -read zilla:begin.ext ${mqtt:matchBeginEx() - .typeId(zilla:id("mqtt")) - .session() - .clientId("one") - .will() - .topic("wills/one") - .format("TEXT") - .payload("client one session expired") - .build() - .build() - .build()} - -connected - -write zilla:data.empty -write flush - -read zilla:end.ext ${mqtt:endEx() - .typeId(zilla:id("mqtt")) - .reason("KEEP_ALIVE_EXPIRY") - .build()} - -read closed -write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt index 108080f362..aed7e6d713 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt @@ -21,23 +21,28 @@ connect "zilla://streams/app0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() + .flags("WILL", "CLEAN_START") .clientId("one") - .will() - .topic("wills/one") - .format("TEXT") - .payload("client one session expired") - .build() .build() .build()} connected -read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +write ${mqtt:will() + .topic("wills/one") + .format("TEXT") + .payload("client one session expired") + .build()} +write flush -write zilla:end.ext ${mqtt:endEx() - .typeId(zilla:id("mqtt")) - .reason("DISCONNECT") - .build()} +read zilla:data.empty write close read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt index 94489d98e3..c570dab6f8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt @@ -23,24 +23,29 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .flags("WILL", "CLEAN_START") .clientId("one") - .will() - .topic("wills/one") - .format("TEXT") - .payload("client one session expired") - .build() .build() .build()} connected + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +read ${mqtt:will() + .topic("wills/one") + .format("TEXT") + .payload("client one session expired") + .build()} + write zilla:data.empty write flush -read zilla:end.ext ${mqtt:endEx() - .typeId(zilla:id("mqtt")) - .reason("DISCONNECT") - .build()} - read closed write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt index ce41ce3f68..8129b052e5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt @@ -22,15 +22,24 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .clientId("one") - .will() - .topic("wills/one") - .flags("RETAIN") - .format("TEXT") - .payload("client one session expired") - .build() .build() .build()} connected +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +write ${mqtt:will() + .topic("wills/one") + .flags("RETAIN") + .format("TEXT") + .payload("client one session expired") + .build()} +write flush + read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt index 7cbdb75f40..8bf27cb120 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt @@ -24,16 +24,25 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("one") - .will() - .topic("wills/one") - .flags("RETAIN") - .format("TEXT") - .payload("client one session expired") - .build() .build() .build()} connected + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +read ${mqtt:will() + .topic("wills/one") + .flags("RETAIN") + .format("TEXT") + .payload("client one session expired") + .build()} + write zilla:data.empty write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt index e2ce9cf455..0a09934b3b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt index 6712eeeb12..becde28c74 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt index 17253032c3..01bae13c04 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt index d1b4bfceb1..9f3305413e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt index 939934e70b..21af97d26a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt index 199ed1b4e5..11fe55d81a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt index 58ca55aadf..d7944ac3e7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt @@ -18,23 +18,23 @@ connect "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" option zilla:byteorder "network" - option zilla:byteorder "network" connected -write [0x10 0x18] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x05] # properties = maximum packet size - [0x27] 50 # maximum packet size = 50 - [0x00 0x06] "client" # client id +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties = maximum packet size + [0x27] 50 # maximum packet size = 50 + [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 50 # maximum packet size = 50 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt index fbc06eccd0..ad9b156305 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt @@ -18,24 +18,24 @@ accept "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" option zilla:byteorder "network" - option zilla:byteorder "network" accepted connected -read [0x10 0x18] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x05] # properties = maximum packet size - [0x27] 50 # maximum packet size = 50 - [0x00 0x06] "client" # client id +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties = maximum packet size + [0x27] 50 # maximum packet size = 50 + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 50 # maximum packet size = 50 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt index a65921d8e9..dddff030bd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt @@ -20,19 +20,20 @@ connect "zilla://streams/net0" connected -write [0x10 0x18] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x05] # properties = maximum packet size - [0x27 0x00 0x32] # maximum packet size = 50 - [0x00 0x06] "client" # client id +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties = maximum packet size + [0x27 0x00 0x32] # maximum packet size = 50 + [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt index 2821fc44de..b0bde6dc23 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt @@ -21,19 +21,20 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x18] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x05] # properties = maximum packet size - [0x27 0x00 0x32] # maximum packet size = 50 - [0x00 0x06] "client" # client id +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties = maximum packet size + [0x27 0x00 0x32] # maximum packet size = 50 + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt index 18fcb7c354..e296e3aa88 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt @@ -29,8 +29,9 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x05] # CONNACK +read [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt index 4499d83b64..badb4b4bdf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt @@ -30,8 +30,9 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x05] # CONNACK +write [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt index f5732d9ba7..b9b5d32130 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt @@ -30,7 +30,8 @@ write [0x10 0x21] # CONNECT [0x00 0x06] "client" # client id [0x00 0x0c] "Bearer TOKEN" # password -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt index c16e710bce..57871900b2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt @@ -31,7 +31,8 @@ read [0x10 0x21] # CONNECT [0x00 0x06] "client" # client id [0x00 0x0c] "Bearer TOKEN" # password -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt new file mode 100644 index 0000000000..6a8b2df528 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt @@ -0,0 +1,42 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0xff 0x7f] # CONNECT, remaining length = 16383 + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x06] # flags = will flag, clean start + [0x00 0x0a] # keep alive = 10s + [0x00] # properties + [0x00 0x03] "one" # client id + [0x02] # will properties + [0x01 0x01] # format = utf-8 + [0x00 0x09] "wills/one" # will topic + [0xdf 0x3f] ${mqtt:randomBytes(16351)} # will payload + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x95] # reason = packet too large + [0x00] # properties = none + +read closed +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt new file mode 100644 index 0000000000..5c7427a967 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0xff 0x7f] # CONNECT, remaining length = 16383 + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x06] # flags = will flag, clean start + [0x00 0x0a] # keep alive = 10s + [0x00] # properties + [0x00 0x03] "one" # client id + [0x02] # will properties + [0x01 0x01] # format = utf-8 + [0x00 0x09] "wills/one" # will topic + [0xdf 0x3f] [0..16351] # will payload + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x95] # reason = packet too large + [0x00] # properties = none + +write close +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt index 4716401c5a..b5bcdf864d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none - [0x00] # reason = success - [0x00] # properties = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x10 0x13] # CONNECT [0x00 0x04] "MQTT" # protocol name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt index be588902cc..b67de5c22f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason = success - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x10 0x13] # CONNECT [0x00 0x04] "MQTT" # protocol name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt index 242add14df..3f273dbfaf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt @@ -29,8 +29,10 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x05] # CONNACK +read [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x25 0x00] # retain unavailable + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt index cdbb92a648..915ca3ba71 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt @@ -30,8 +30,9 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x05] # CONNACK +write [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x25 0x00] # retain unavailable diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt index d3eaed1606..e529445593 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt @@ -29,8 +29,9 @@ write [0x10 0x0d] # CONNECT [0x00] # properties = none [0x00 0x00] # client id -read [0x20 0x2a] # CONNACK +read [0x20 0x2f] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x27] # properties + [0x2c] # properties + [0x27] 66560 # maximum packet size = 66560 [0x12 0x00 0x24] [0..36] # assigned clientId diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt index f4e35b2b1e..14425c0795 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt @@ -32,8 +32,9 @@ read [0x10 0x0d] # CONNECT [0x00] # properties = none [0x00 0x00] # client id -write [0x20 0x2a] # CONNACK +write [0x20 0x2f] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x27] # properties + [0x2c] # properties + [0x27] 66560 # maximum packet size = 66560 [0x12 0x00 0x24] ${assignedClientId} # assigned clientId diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt index 90b2c6b917..a60102b57c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties = server keep alive + [0x08] # properties = server keep alive + [0x27] 66560 # maximum packet size = 66560 [0x13] 10s # server keep alive = 10s read await WAIT_2_SECONDS diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt index 3af4304ce1..8f4267357c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties = server keep alive + [0x08] # properties = server keep alive + [0x27] 66560 # maximum packet size = 66560 [0x13] 10s # server keep alive = 10s write notify WAIT_2_SECONDS diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt index ccf49381b4..728ae4003a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt @@ -28,7 +28,6 @@ write [0x10 0x13] # CONNECT [0x00 0x3c] # keep alive = 60s [0x00] # properties = none [0x00 0x06] "client" # client id - [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 [0x02] # properties @@ -36,10 +35,11 @@ write [0x10 0x13] # CONNECT [0x00 0x0a] "sensor/one" # topic filter [0x20] # options = at-most-once -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt index db105b99fc..1ed4dd5906 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt @@ -36,12 +36,13 @@ read [0x10 0x13] # CONNECT [0x00 0x0a] "sensor/one" # topic filter [0x20] # options = at-most-once -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00] # reason code +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt index 11aab482ff..ab94edaee7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt @@ -29,7 +29,8 @@ write [0x05] # protocol version [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt index 08b8a21115..7eeb5401f2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt @@ -22,15 +22,16 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt index 60cde0868b..05646c53a2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt @@ -29,7 +29,8 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt index e7deccee13..7eeb5401f2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt @@ -22,15 +22,16 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt index 6ae62af4fa..36e31e274d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt @@ -28,12 +28,13 @@ write [0x10 0x21] # CONNECT [0x00 0x3c] # keep alive = 60s [0x00] # properties = none [0x00 0x06] "client" # client id - [0x00 0x0c] "Bearer TOKEN" # password + [0x00 0x0c] "Bearer TOKEN" # username -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x39] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt index 97bd5176c3..4e6ff6a617 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt @@ -31,7 +31,8 @@ read [0x10 0x21] # CONNECT [0x00 0x06] "client" # client id [0x00 0x0c] "Bearer TOKEN" # username -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt index 4a82d8b390..e1fd8c7e1b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0xe0 0x02] # DISCONNECT [0x8d] # reason = keep alive timeout diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt index bfbcbf1ea5..3990ba344c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0xe0 0x02] # DISCONNECT [0x8d] # reason = keep alive timeout diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt index fc5499f3d1..bfb44bde10 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt index ab8e3998a8..56422a43bd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt index 467424a17c..19450a379d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0xe1 0x02] # malformed DISCONNECT [0x00] # normal disconnect diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt index 61ac0df492..4d2e89d6ca 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0xe1 0x02] # malformed DISCONNECT [0x00] # normal disconnect diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt index 0e4dccb94f..25b1f63b22 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0xe0 0x02] # DISCONNECT [0x00] # normal disconnect diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt index 8e28b450c8..1d7188a18c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0xe0 0x02] # DISCONNECT [0x00] # normal disconnect diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt index ee394def99..1da22ab03f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt @@ -21,35 +21,36 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x01] # keep alive = 1s - [0x00] # properties = none - [0x00 0x06] "client" # client id - -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x01] # keep alive = 1s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read await WAIT_1_SECOND -write [0xc0 0x00] # PINGREQ +write [0xc0 0x00] # PINGREQ -read [0xd0 0x00] # PINGRESP +read [0xd0 0x00] # PINGRESP read await WAIT_1_SECOND2 -write [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x20] # options = at-most-once - -read [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00] # reason code +write [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt index 273ea6dbbf..68e12ef631 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt @@ -22,35 +22,36 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x01] # keep alive = 1s - [0x00] # properties = none - [0x00 0x06] "client" # client id - -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x01] # keep alive = 1s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write notify WAIT_1_SECOND -read [0xc0 0x00] # PINGREQ +read [0xc0 0x00] # PINGREQ -write [0xd0 0x00] # PINGRESP +write [0xd0 0x00] # PINGRESP write notify WAIT_1_SECOND2 -read [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x20] # options = at-most-once - -write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00] # reason code +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt index 969c1907f5..b2fdfa4427 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x11] # CONNECT [0x00] # properties = none [0x00 0x04] "abcd" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0xc0 0x00] # PINGREQ diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt index dbe9b48291..8c6e16f18e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x11] # CONNECT [0x00] # properties = none [0x00 0x04] "abcd" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0xc0 0x00] # PINGREQ diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt index b035043ef2..18ad3ac68d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x0d] # PUBLISH flags = at-most-once [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt index c6835ad4bd..a47c03c9d6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x0d] # PUBLISH flags = at-most-once [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt index 8d2b0d7dc1..8e180c26df 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x31 0x0d] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt index a5a00b450f..b621d3f44d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x31 0x0d] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt index b39fec144d..6b0f47417c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt @@ -30,10 +30,11 @@ write [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 write [0x30 0x17] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt index bd6707ead1..4c3f098582 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt @@ -31,10 +31,11 @@ read [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 read [0x30 0x17] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt index 59ea2e689b..4d8a3ad977 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt @@ -29,11 +29,12 @@ write [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties - [0x22 0x00 0x01] # topic alias maximum = 1 + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x22] 1s # topic alias maximum = 1 write [0x30 0x18] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt index 382be31814..e7aaa2b47b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt @@ -30,11 +30,12 @@ read [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties - [0x22 0x00 0x01] # topic alias maximum = 1 + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x22] 1s # topic alias maximum = 1 read [0x30 0x18] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt index 7816cde38d..ab309028ed 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt @@ -30,10 +30,11 @@ write [0x10 0x16] # CONNECT [0x22 0x00 0x02] # topic alias maximum = 2 [0x00 0x06] "client" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 2s # topic alias maximum = 2 write [0x30 0x18] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt index e3acfcf586..d8e29935cf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt @@ -31,10 +31,11 @@ read [0x10 0x16] # CONNECT [0x22 0x00 0x02] # topic alias maximum = 2 [0x00 0x06] "client" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 2s # topic alias maximum = 2 read [0x30 0x18] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt index f407519a5b..2e9d8afc44 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt @@ -30,10 +30,11 @@ write [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 write [0x30 0x18] # PUBLISH @@ -67,10 +68,11 @@ write [0x10 0x17] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x07] "client2" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 write [0x30 0x0e] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt index 9253a6b7ef..f43b342306 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt @@ -31,10 +31,11 @@ read [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 read [0x30 0x18] # PUBLISH @@ -62,10 +63,11 @@ read [0x10 0x17] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x07] "client2" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 read [0x30 0x0e] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt index 842cf10a1a..573328e173 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt @@ -30,10 +30,11 @@ write [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 write [0x30 0x18] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt index 535fe0eb68..b9154917d0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt @@ -31,10 +31,11 @@ read [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 read [0x30 0x18] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt index 5555adf084..8a39558db3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt @@ -30,10 +30,11 @@ write [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 write [0x30 0x18] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt index 63ac197384..5d627dd331 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt @@ -31,10 +31,11 @@ read [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 read [0x30 0x18] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt index 6e2fca673e..9c790a6c1f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x15] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt index 2380868209..d876b2b6c1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x15] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt index e179751732..e8a61b496e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x15] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt index a0b2569596..58cbb0a288 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x15] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt index ae7414919b..ff642d5d12 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x15] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt index a0b2569596..58cbb0a288 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x15] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt index 4030fe9abf..50d990ab95 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x39] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt index 463fc62f5b..49d54fcd5b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x39] # PUBLISH [0x00 0x0a] "sensor/one" # topic name @@ -44,14 +45,14 @@ read [0x30 0x39] # PUBLISH [0x08 0x00 0x0a] "sensor/one" # response topic [0x09 0x00 0x04] "info" # correlation data "message" # payload - [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/two" # topic filter - [0x20] # options = at-most-once + [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once -write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00] # reason code +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt index 44a0e55da2..9ab4ebe435 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x39] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt index 211a29685b..9e2f2cbd0c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x39] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt index ce2f3c1d7b..fa4565b101 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x31 0x28] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt index 48bd9f4b0e..6a78f222dc 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x31 0x28] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt index bf56176077..ffa3fd696a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x36] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt index ad423bf5d9..2412c0df9f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x36] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt new file mode 100644 index 0000000000..04c7b54dbc --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt @@ -0,0 +1,54 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 9216 # maximum packet size = 9216 + +write [0x30 0xff 0x7f] # PUBLISH, remaining length = 16383 + [0x00 0x0a] "sensor/one" # topic name + [0x25] # properties + [0x02] 0x0f # expiry = 15 seconds + [0x03 0x00 0x07] "message" # content type + [0x01 0x01] # format = utf-8 + [0x08 0x00 0x0a] "sensor/one" # response topic + [0x09 0x00 0x04] "info" # correlation data + ${mqtt:randomBytes(16326)} # payload + +read [0xe0 0x02] # disconnect header + [0x95] # reason = packet too large + [0x00] # properties = none + +read closed +write close + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt new file mode 100644 index 0000000000..863be0b74c --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt @@ -0,0 +1,55 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 9216 # maximum packet size = 9216 + +read [0x30 0xff 0x7f] # PUBLISH, remaining length = 16383 + [0x00 0x0a] "sensor/one" # topic name + [0x25] # properties + [0x02] 0x0f # expiry = 15 seconds + [0x03 0x00 0x07] "message" # content type + [0x01 0x01] # format = utf-8 + [0x08 0x00 0x0a] "sensor/one" # response topic + [0x09 0x00 0x04] "info" # correlation data + [0..16326] # payload + +write [0xe0 0x02] # disconnect header + [0x95] # reason = packet too large + [0x00] # properties = none + +write close +read closed + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt index aa6fe7845a..fe2d55b49f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x3b] # PUBLISH, qos = at most once [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt index 270f88990c..345786e4d7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x3b] # PUBLISH, qos = at most once [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt index e211184ae2..5101f466af 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x05] # CONNACK +read [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once write [0x32 0x39] # PUBLISH, qos = at least once diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt index 4ece5acfa3..379e720cfe 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x05] # CONNACK +write [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once read [0x32 0x39] # PUBLISH, qos = at least once diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt index 42bbd0b808..5e94229103 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x32 0x39] # PUBLISH, qos = at least once [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt index 5acb55f014..d18a86c6ce 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x32 0x39] # PUBLISH, qos = at least once [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt index a673d141f9..cee75c68e9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x05] # CONNACK +read [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once write [0x34 0x39] # PUBLISH, qos = exactly once diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt index 40970e6b00..490588aa66 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x05] # CONNACK +write [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once read [0x34 0x39] # PUBLISH, qos = exactly once diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt index c2a98921f4..865f19c5fc 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x34 0x39] # PUBLISH, qos = exactly once [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt index 50dc4d2a37..12b201a50f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x34 0x39] # PUBLISH, qos = exactly once [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt index 01a18efe7f..a62e263d13 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x05] # CONNACK +read [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x25 0x00] # retain unavailable write [0x31 0x26] # PUBLISH flags = at-most-once, retain diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt index 155185b854..00308505a8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x05] # CONNACK +write [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x25 0x00] # retain unavailable read [0x31 0x26] # PUBLISH flags = at-most-once, retain diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt index ce747d5f28..861eb55fdf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x17] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt index 30c71e3d95..5dd6de1f31 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x17] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt index f0465d8ee5..7aa1203da8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt @@ -30,10 +30,11 @@ write [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -read [0x20 0x06] # CONNACK +read [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 write [0x30 0x1a] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt index be89b64955..c0932a2468 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt @@ -31,10 +31,11 @@ read [0x10 0x16] # CONNECT [0x22 0x00 0x01] # topic alias maximum = 1 [0x00 0x06] "client" # client id -write [0x20 0x06] # CONNACK +write [0x20 0x0b] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x03] # properties + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 [0x22] 1s # topic alias maximum = 1 read [0x30 0x1a] # PUBLISH diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt index 1cd14952d2..1c2057ce46 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x31 0x14] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt index 31fdbc9e16..10ee4d4df2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x31 0x14] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt index 2af757f188..d40221b8e3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x15] # PUBLISH [0x00 0x0b] "sensor/none" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt index 265527cf55..69db05a2ac 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x15] # PUBLISH [0x00 0x0b] "sensor/none" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt index 35429cd5d6..e15d7931c4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt @@ -21,26 +21,27 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id +write [0x10 0x31] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -write [0x31 0x4a] # PUBLISH - [0x00 0x0a] "/sensors/1" # topic name - [0x14] # properties - [0x26] # user property id - [0x00 0x04] "row1" # user property key - [0x00 0x01] "1" # user property value - [0x26] # user property id - [0x00 0x04] "row2" # user property key - [0x00 0x01] "2" # user property value +write [0x31 0x4a] # PUBLISH + [0x00 0x0a] "/sensors/1" # topic name + [0x14] # properties + [0x26] # user property id + [0x00 0x04] "row1" # user property key + [0x00 0x01] "1" # user property value + [0x26] # user property id + [0x00 0x04] "row2" # user property key + [0x00 0x01] "2" # user property value "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt index 84e1702313..4470270545 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt @@ -22,26 +22,27 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id +read [0x10 0x31] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x4a] # PUBLISH - [0x00 0x0a] "/sensors/1" # topic name - [0x14] # properties - [0x26] # user property id - [0x00 0x04] "row1" # user property key - [0x00 0x01] "1" # user property value - [0x26] # user property id - [0x00 0x04] "row2" # user property key - [0x00 0x01] "2" # user property value - "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload +read [0x31 0x4a] # PUBLISH + [0x00 0x0a] "/sensors/1" # topic name + [0x14] # properties + [0x26] # user property id + [0x00 0x04] "row1" # user property key + [0x00 0x01] "1" # user property value + [0x26] # user property id + [0x00 0x04] "row2" # user property key + [0x00 0x01] "2" # user property value + "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt index ba0327ee3f..7ee13a5f8b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt @@ -21,26 +21,27 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id +write [0x10 0x31] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -write [0x31 0x4a] # PUBLISH - [0x00 0x0a] "/sensors/1" # topic name - [0x14] # properties - [0x26] # user property id - [0x00 0x04] "row1" # user property key - [0x00 0x01] "1" # user property value - [0x26] # user property id - [0x00 0x04] "row1" # user property key - [0x00 0x01] "2" # user property value +write [0x31 0x4a] # PUBLISH + [0x00 0x0a] "/sensors/1" # topic name + [0x14] # properties + [0x26] # user property id + [0x00 0x04] "row1" # user property key + [0x00 0x01] "1" # user property value + [0x26] # user property id + [0x00 0x04] "row1" # user property key + [0x00 0x01] "2" # user property value "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt index 1b5899fe62..0186b1e4aa 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt @@ -22,26 +22,27 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id +read [0x10 0x31] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x4a] # PUBLISH - [0x00 0x0a] "/sensors/1" # topic name - [0x14] # properties - [0x26] # user property id - [0x00 0x04] "row1" # user property key - [0x00 0x01] "1" # user property value - [0x26] # user property id - [0x00 0x04] "row1" # user property key - [0x00 0x01] "2" # user property value - "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload +read [0x31 0x4a] # PUBLISH + [0x00 0x0a] "/sensors/1" # topic name + [0x14] # properties + [0x26] # user property id + [0x00 0x04] "row1" # user property key + [0x00 0x01] "1" # user property value + [0x26] # user property id + [0x00 0x04] "row1" # user property key + [0x00 0x01] "2" # user property value + "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt index 972f9f3f53..6e3ea77da4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt @@ -21,23 +21,24 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id +write [0x10 0x31] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -write [0x31 0x3f] # PUBLISH - [0x00 0x0a] "/sensors/1" # topic name - [0x09] # properties - [0x26] # user property id - [0x00 0x03] "row" # user property key - [0x00 0x01] "1" # user property value +write [0x31 0x3f] # PUBLISH + [0x00 0x0a] "/sensors/1" # topic name + [0x09] # properties + [0x26] # user property id + [0x00 0x03] "row" # user property key + [0x00 0x01] "1" # user property value "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt index bc00e3d81e..e4f952dd8b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt @@ -22,23 +22,24 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id +read [0x10 0x31] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x3f] # PUBLISH - [0x00 0x0a] "/sensors/1" # topic name - [0x09] # properties - [0x26] # user property id - [0x00 0x03] "row" # user property key - [0x00 0x01] "1" # user property value - "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload +read [0x31 0x3f] # PUBLISH + [0x00 0x0a] "/sensors/1" # topic name + [0x09] # properties + [0x26] # user property id + [0x00 0x03] "row" # user property key + [0x00 0x01] "1" # user property value + "{\"id\":\"1\",\"unit\":\"CELSIUS\",\"value\":\"189\"}" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt index 40167198c0..5d745bb68a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 @@ -67,15 +68,16 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x01] # flags = session present - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x01] # flags = session present + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0x30 0x18] # PUBLISH - [0x00 0x0a] "sensor/one" # topic name - [0x04] # properties - [0x0b 0x01] # subscription id = 1 - [0x01 0x01] # format = utf-8 - "message" # payload +read [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x04] # properties + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt index e0e6e45a37..7aa9624f23 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt @@ -22,18 +22,19 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 @@ -42,10 +43,10 @@ read [0x82 0x12] # SUBSCRIBE [0x00 0x0a] "sensor/one" # topic filter [0x20] # options = at-most-once -write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00] # reason code +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code read aborted write abort @@ -62,14 +63,15 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x01] # flags = session present [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -write [0x30 0x18] # PUBLISH - [0x00 0x0a] "sensor/one" # topic name - [0x04] # properties - [0x0b 0x01] # subscription id = 1 - [0x01 0x01] # format = utf-8 - "message" # payload +write [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x04] # properties + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt index 52ee3f7e37..35b8b9723b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 @@ -71,15 +72,15 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x01] # flags = session present - [0x00] # reason code - [0x00] # properties = none - +read [0x20 0x08] # CONNACK + [0x01] # flags = session present + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0x30 0x18] # PUBLISH - [0x00 0x0a] "sensor/one" # topic name - [0x04] # properties - [0x0b 0x01] # subscription id = 1 - [0x01 0x01] # format = utf-8 - "message" # payload +read [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x04] # properties + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt index 31bd2515d1..e2325eeaf2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 @@ -65,14 +66,15 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x01] # flags = session present [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -write [0x30 0x18] # PUBLISH - [0x00 0x0a] "sensor/one" # topic name - [0x04] # properties - [0x0b 0x01] # subscription id = 1 - [0x01 0x01] # format = utf-8 - "message" # payload \ No newline at end of file +write [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x04] # properties + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt new file mode 100644 index 0000000000..29b4a77410 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt @@ -0,0 +1,40 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x3a] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x26] # flags = will retain, will flag, clean start + [0x00 0x0a] # keep alive = 10s + [0x00] # properties + [0x00 0x03] "one" # client id + [0x02] # will properties + [0x01 0x01] # format = utf-8 + [0x00 0x09] "wills/one" # will topic +write [0x00 0x1a] "client one session expired" # will payload + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt new file mode 100644 index 0000000000..d1f8667469 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x3a] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x26] # flags = will retain, will flag, clean start + [0x00 0x0a] # keep alive = 10s + [0x00] # properties + [0x00 0x03] "one" # client id + [0x02] # will properties + [0x01 0x01] # format = utf-8 + [0x00 0x09] "wills/one" # will topic + [0x00 0x1a] "client one session expired" # will payload + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt index 48cba5d50c..9f051d9142 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt @@ -18,21 +18,21 @@ connect "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" option zilla:byteorder "network" - option zilla:byteorder "network" connected -write [0x10 0x10] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x00] # flags = none - [0x00 0x3c] # keep alive = 60s - [0x05] # properties - [0x11] 1 # session expiry interval - [0x00 0x03] "one" # client id +write [0x10 0x10] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x00] # flags = none + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x11] 1 # session expiry interval + [0x00 0x03] "one" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt index 4fddefb67b..ad0d678c0c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt @@ -18,21 +18,21 @@ accept "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" option zilla:byteorder "network" - option zilla:byteorder "network" accepted connected -read [0x10 0x10] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x00] # flags = none - [0x00 0x3c] # keep alive = 60s - [0x05] # properties - [0x11] 1 # session expiry interval - [0x00 0x03] "one" # client id +read [0x10 0x10] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x00] # flags = none + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x11] 1 # session expiry interval + [0x00 0x03] "one" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt index 1028ffd030..9442e69a80 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 @@ -60,7 +61,7 @@ connect await FIRST_CONNECTED "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" - option zilla:byteorder "network" + option zilla:byteorder "network" connected @@ -72,7 +73,8 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt index 7a36e1223c..152272502b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt @@ -22,18 +22,19 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 @@ -47,9 +48,9 @@ write [0x90 0x04] # SUBACK [0x00] # properties = none [0x00] # reason code -write [0xe0 0x02] # DISCONNECT - [0x8e] # session taken over - [0x00] # properties = none +write [0xe0 0x02] # DISCONNECT + [0x8e] # session taken over + [0x00] # properties = none write close read closed @@ -58,15 +59,16 @@ read closed accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt index 286b7882be..c5ee483918 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0xe0 0x13] # DISCONNECT [0x9d] # reason code = Use another server diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt index 73a845c997..58120e20f9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0xe0 0x13] # DISCONNECT [0x9d] # reason code = Use another server diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt index 6c4ad76ad8..3124f2838a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt index ec1a3928af..f34e33da00 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt @@ -22,39 +22,40 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id - -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none - -read [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x20] # options = at-most-once - -write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00] # reason code - -read [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x02] # subscription id = 2 - [0x00 0x0a] "sensor/two" # topic filter - [0x20] # options = at-most-once - -write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00] # reason codes +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x02] # subscription id = 2 + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt index c6cab05d5b..429035ba02 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x30 0x16] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt index 47b29b72b7..04129949be 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt @@ -22,18 +22,19 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x30 0x16] # PUBLISH [0x00 0x0a] "sensor/one" # topic name diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt index 1568918c03..f862e9b39b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt index f346fdfe04..7c5aa38256 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt index 30590722b8..7da1547868 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt @@ -21,53 +21,54 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -write [0x82 0x1f] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x20] # options = at-most-once +write [0x82 0x1f] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once - [0x00 0x0a] "sensor/two" # topic filter - [0x20] # options = at-most-once + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once -write [0xa2 0x0f] # UNSUBSCRIBE - [0x00 0x02] # packet id = 2 - [0x00] # properties = none - [0x00 0x0a] "sensor/one" # topic filter +write [0xa2 0x0f] # UNSUBSCRIBE + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00 0x0a] "sensor/one" # topic filter -write [0xa2 0x0f] # UNSUBSCRIBE - [0x00 0x03] # packet id = 3 - [0x00] # properties = none - [0x00 0x0a] "sensor/two" # topic filter +write [0xa2 0x0f] # UNSUBSCRIBE + [0x00 0x03] # packet id = 3 + [0x00] # properties = none + [0x00 0x0a] "sensor/two" # topic filter write notify UNSUBSCRIBE_ALL_FILTERS -read [0x90 0x05] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00 0x00] # reason code +read [0x90 0x05] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00 0x00] # reason code -read [0xb0 0x04] # UNSUBACK - [0x00 0x02] # packet id = 2 - [0x00] # properties = none - [0x00] # unsubscribe = success +read [0xb0 0x04] # UNSUBACK + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00] # unsubscribe = success -read [0xb0 0x04] # UNSUBACK - [0x00 0x03] # packet id = 3 - [0x00] # properties = none - [0x00] # unsubscribe = success +read [0xb0 0x04] # UNSUBACK + [0x00 0x03] # packet id = 3 + [0x00] # properties = none + [0x00] # unsubscribe = success diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt index 682fdc2853..908b65016f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt @@ -22,38 +22,39 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0x82 0x1f] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x20] # options = at-most-once +read [0x82 0x1f] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once - [0x00 0x0a] "sensor/two" # topic filter - [0x20] # options = at-most-once + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once -read [0xa2 0x0f] # UNSUBSCRIBE - [0x00 0x02] # packet id = 2 - [0x00] # properties = none - [0x00 0x0a] "sensor/one" # topic filter +read [0xa2 0x0f] # UNSUBSCRIBE + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00 0x0a] "sensor/one" # topic filter -read [0xa2 0x0f] # UNSUBSCRIBE - [0x00 0x03] # packet id = 3 - [0x00] # properties = none - [0x00 0x0a] "sensor/two" # topic filter +read [0xa2 0x0f] # UNSUBSCRIBE + [0x00 0x03] # packet id = 3 + [0x00] # properties = none + [0x00 0x0a] "sensor/two" # topic filter write [0x90 0x05] # SUBACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt index 9aa1320c06..186b1e219e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt index 6275448c9e..cbc4de3ba7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt index 7b82dfc026..01fca82896 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt @@ -33,10 +33,11 @@ write [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0xe0 0x02] # DISCONNECT [0x04] # disconnect with will message diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt index c4e6b53855..5996116b17 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt @@ -34,10 +34,11 @@ read [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0xe0 0x02] # DISCONNECT [0x04] # disconnect with will message diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt index 002f06c76a..cb4483c26d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt @@ -33,10 +33,11 @@ write [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0xe0 0x02] # DISCONNECT [0x8d] # reason = keep alive timeout diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt index 25c88efa05..eeea697dfb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt @@ -34,10 +34,11 @@ read [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0xe0 0x02] # DISCONNECT [0x8d] # reason = keep alive timeout diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt index 5b9b318d90..e1600b0b51 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt @@ -33,10 +33,11 @@ write [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0xe0 0x02] # DISCONNECT [0x00] # normal disconnect diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt index 883cc65b8e..6a1b53ddf3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt @@ -34,10 +34,11 @@ read [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0xe0 0x02] # DISCONNECT [0x00] # normal disconnect diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt index 4059848eac..a3cb27a295 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt @@ -33,8 +33,8 @@ write [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none - + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt index 9114402158..d1f8667469 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt @@ -34,7 +34,8 @@ read [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt index 6e205f28f4..e9bc4c9e79 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt index 1bec59b7a5..61944e33f0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt index f7fda12388..a2a9df28f2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x83 0x12] # malformed SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt index d2539d4a04..f05e044a79 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x83 0x12] # malformed SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt index f4689b5087..b89bfcd670 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt index d19ea3313a..4938651215 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt index e206de2a14..4ed82006f8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt index 693ca30365..6f569a549b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt index cf10a9efac..f0e68093c7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt index 89fdc8124f..d7e28e9599 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt index 9de59055cb..503cc35fe3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt index 4eb5e8814a..8265375b49 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt index 8113efb2c4..e1491aafbc 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt index 04a5236644..118e92c629 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt index 0b2b4fd884..299bbd396b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt index adce45abe2..71a46760c9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt index 388c9dba39..4b1143221b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt index 25b8f9d763..2f5bb0a797 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt index 20ffd331c2..ba6e06343f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x14] # CONNECT [0x00] # properties = none [0x00 0x07] "client2" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x31 0x14] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name @@ -57,10 +58,11 @@ write [0x10 0x14] # CONNECT [0x00] # properties = none [0x00 0x07] "client1" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt index dc1c3a6e66..436b990840 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x14] # CONNECT [0x00] # properties = none [0x00 0x07] "client2" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x31 0x14] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name @@ -53,10 +54,11 @@ read [0x10 0x14] # CONNECT [0x00] # properties = none [0x00 0x07] "client1" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt index 6e3a8ee494..1fbfe7f35c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x14] # CONNECT [0x00] # properties = none [0x00 0x07] "client1" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x31 0x14] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name @@ -58,10 +59,11 @@ write [0x10 0x14] # CONNECT [0x00] # properties = none [0x00 0x07] "client2" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt index 627d0b1f62..d67bc10f41 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt @@ -30,12 +30,13 @@ read [0x10 0x14] # CONNECT [0x00] # properties = none [0x00 0x07] "client1" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x14] # PUBLISH flags = at-most-once, retain +read [0x31 0x14] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name [0x00] # properties "message" # payload @@ -51,10 +52,11 @@ read [0x10 0x14] # CONNECT [0x00] # properties = none [0x00 0x07] "client2" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt index cb8c25a052..8a69f38f4a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt index 196f5130f1..e083bdc4d5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt index df3f3f98d4..1c5fd3fec3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt index 80e3f9db30..1c18976493 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt index 18cade375b..f293f66014 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt index 693ca30365..c7294e3326 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 @@ -58,4 +59,4 @@ write [0x30 0x18] # PUBLISH [0x04] # properties [0x0b 0x01] # subscription id = 1 [0x01 0x01] # format = utf-8 - "message" # payload \ No newline at end of file + "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt index b39b697ee0..f935b83e27 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt index fc394dde50..f915d88261 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt index 683ce043d8..e8d88fb786 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x10] # SUBSCRIBE [0x02] # properties diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt index 157566c112..4fe6b91fc7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x10] # SUBSCRIBE [0x02] # properties diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt index 9c36998f8a..02d4452a51 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x03] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt index 95e8e7aa4f..efa46acd52 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x03] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt index 41e3ba9b00..b981a2ef53 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt index cbb98d694a..cae75dc440 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt index 089f74870e..e74f580404 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x05] # CONNACK +read [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x2a 0x00] # shared subscription unavailable write [0x82 0x23] # SUBSCRIBE diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt index 6208375089..955a0daa5a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x05] # CONNACK +write [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x2a 0x00] # shared subscription unavailable read [0x82 0x23] # SUBSCRIBE diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt index fcf5bf68a9..b4ab99c3ab 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x05] # CONNACK +read [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x29 0x00] # subscription identifiers unavailable write [0x82 0x12] # SUBSCRIBE diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt index 0a49b964f9..68c5d5f498 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x05] # CONNACK +write [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x29 0x00] # subscription identifiers unavailable read [0x82 0x12] # SUBSCRIBE diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt index 43385085a0..85b8c1d71d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt index 097955c349..7d38220bd3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt @@ -22,18 +22,19 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt index feedd243e0..4c01193032 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x05] # CONNACK +read [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x28 0x00] # wildcard subscription unavailable write [0x82 0x14] # SUBSCRIBE diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt index 74cfb47054..3a5baf1ab2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x05] # CONNACK +write [0x20 0x0a] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x02] # properties + [0x07] # properties + [0x27] 66560 # maximum packet size = 66560 [0x28 0x00] # wildcard subscription unavailable read [0x82 0x14] # SUBSCRIBE diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt index fa8fce65d0..564d33f3a5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt index 36109c91be..c17d971cc0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt index a04837c7ed..92d5d93d68 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt index ab94ad4d69..15e744e5f5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt index bae0c2cdc0..5c5f4b3eab 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x14] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt index 1c4a186cfd..96650fc06a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt @@ -22,18 +22,19 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x14] # SUBSCRIBE [0x00 0x01] # packet id = 1 @@ -42,7 +43,7 @@ read [0x82 0x14] # SUBSCRIBE [0x00 0x0c] "sensor/+/1/#" # topic filter [0x20] # options = at-most-once -write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00] # reason codes +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt index 6d9f1dcfe5..434ce18b3c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt index d5ef889cf7..7c914c5b51 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt index b4f6fda4ab..a357131f7e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt index 42bd2af797..8aceb59618 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt index cbd253e529..9ecd5c519f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt index c1d9da82e3..972062cbac 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt @@ -22,18 +22,19 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt index 0353af589c..e1d584fb47 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt index 9589f5edd5..cfc2382136 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt index 5ff09fbca3..ea62596109 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x1d] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt index 381f20cc2a..4de6727ff5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x1d] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt index 7fd54dbcca..ecc300141d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x1b] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt index fd29563aaa..435e7ef4eb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x1b] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt index a1a93748aa..c45a415a66 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt index ec1a3928af..45b23519ab 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt index d5a44c06fa..51b7af1649 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt index 9cf15ef8bb..24855d9f90 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x10] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index 181b199d87..532b39fab8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index 7d197e098a..aeadc62da8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt index 1312d627ea..412623486b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK +read [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt index 152943186e..13430ba6e8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt @@ -22,30 +22,31 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0x82 0x1f] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/+/#" # topic filter - [0x20] # options = at-most-once +read [0x82 0x1f] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/+/#" # topic filter + [0x20] # options = at-most-once - [0x00 0x0a] "sensor/+/1" # topic filter - [0x20] # options = at-most-once + [0x00 0x0a] "sensor/+/1" # topic filter + [0x20] # options = at-most-once -write [0x90 0x05] # SUBACK - [0x00 0x01] # packet id = 1 - [0x00] # properties = none - [0x00 0x00] # reason codes +write [0x90 0x05] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00 0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt index e4c43817bd..604012c7b4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt index e1fd92a605..33dc6f6484 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt index f78f7c24ac..98237eb68a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt index 8c640277d7..fa8c2a909b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt index beed4963a2..21707596c8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt @@ -17,6 +17,7 @@ connect "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:byteorder "network" connected @@ -28,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt index a097586616..91fe86981e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt @@ -17,6 +17,7 @@ accept "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:byteorder "network" accepted connected @@ -29,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt index 6d78cacf2d..1a15fe66bd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt @@ -28,10 +28,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt index 381c026006..b99ca469e1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt @@ -17,6 +17,7 @@ accept "zilla://streams/net0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:byteorder "network" accepted connected @@ -29,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt index 1f2e757fe2..e3fc8f7e1b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt index 1627cbe50d..5eeb52dd97 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt index d6832b017f..9788f07ecd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt index 6059ab3187..25a951ba8b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt index a1bd515cb4..af9631abd1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt index 2d3c972dac..a381e9a905 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt index ab7a2a81c3..1a0c4e388c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt index 3918ccccbe..6c370e90cd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt index ff26c0387b..9389db8c48 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt @@ -29,10 +29,11 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x00] # properties = none +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 write [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt index e204f07d33..5bb6ef2c12 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt @@ -30,10 +30,11 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK +write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x00] # properties = none + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 read [0x82 0x1f] # SUBSCRIBE [0x00 0x01] # packet id = 1 diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index 44f2173d67..e6dd35db93 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -29,12 +29,11 @@ import org.junit.Test; import org.kaazing.k3po.lang.el.BytesMatcher; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttEndReasonCode; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttMessageFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormat; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionStateFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttDataExFW; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttEndExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttFlushExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttResetExFW; @@ -61,20 +60,10 @@ public void shouldEncodeMqttSessionBeginExt() final byte[] array = MqttFunctions.beginEx() .typeId(0) .session() - .clientId("client") + .flags("WILL", "CLEAN_START") .expiry(30) - .serverReference("localhost:1883") - .will() - .topic("will.client") - .delay(20) - .expiryInterval(15) - .contentType("message") - .format("TEXT") - .responseTopic("will.client.response") - .correlation("request-id-1") - .userProperty("name", "value") - .payload("client failed") - .build() + .clientId("client") + .serverRef("localhost:1883") .build() .build(); @@ -83,22 +72,9 @@ public void shouldEncodeMqttSessionBeginExt() assertEquals(2, mqttBeginEx.kind()); assertEquals("client", mqttBeginEx.session().clientId().asString()); - assertEquals("localhost:1883", mqttBeginEx.session().serverReference().asString()); + assertEquals("localhost:1883", mqttBeginEx.session().serverRef().asString()); assertEquals(30, mqttBeginEx.session().expiry()); - assertEquals("will.client", mqttBeginEx.session().will().topic().asString()); - assertEquals(20, mqttBeginEx.session().will().delay()); - assertEquals(15, mqttBeginEx.session().will().expiryInterval()); - assertEquals("message", mqttBeginEx.session().will().contentType().asString()); - assertEquals("TEXT", mqttBeginEx.session().will().format().toString()); - assertEquals("will.client.response", mqttBeginEx.session().will().responseTopic().asString()); - assertEquals("request-id-1", mqttBeginEx.session().will().correlation() - .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); - assertNotNull(mqttBeginEx.session().will().properties() - .matchFirst(h -> - "name".equals(h.key().asString()) && - "value".equals(h.value().asString()))); - assertEquals("client failed", mqttBeginEx.session().will().payload() - .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); + assertEquals(6, mqttBeginEx.session().flags()); } @Test @@ -107,8 +83,8 @@ public void shouldEncodeMqttSessionBeginExtWithoutWillMessage() final byte[] array = MqttFunctions.beginEx() .typeId(0) .session() - .clientId("client") .expiry(30) + .clientId("client") .build() .build(); @@ -127,13 +103,6 @@ public void shouldEncodeMqttSessionBeginExtWithFlagsBytesWillPayload() .typeId(0) .session() .clientId("client") - .will() - .topic("will.client") - .qos("AT_LEAST_ONCE") - .flags("RETAIN") - .correlationBytes("request-id-1".getBytes(UTF_8)) - .payloadBytes(new byte[] {0, 1, 2, 3, 4, 5}) - .build() .build() .build(); @@ -142,14 +111,6 @@ public void shouldEncodeMqttSessionBeginExtWithFlagsBytesWillPayload() assertEquals(2, mqttBeginEx.kind()); assertEquals("client", mqttBeginEx.session().clientId().asString()); - assertEquals("will.client", mqttBeginEx.session().will().topic().asString()); - assertEquals(1, mqttBeginEx.session().will().flags()); - assertEquals(0b0001, mqttBeginEx.session().will().flags()); - assertEquals("BINARY", mqttBeginEx.session().will().format().toString()); - assertEquals("request-id-1", mqttBeginEx.session().will().correlation() - .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); - assertArrayEquals(new byte[] {0, 1, 2, 3, 4, 5}, mqttBeginEx.session().will().payload() - .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)).getBytes()); } @Test @@ -321,22 +282,10 @@ public void shouldMatchSessionBeginExtension() throws Exception { BytesMatcher matcher = MqttFunctions.matchBeginEx() .session() - .clientId("client") + .flags("CLEAN_START") .expiry(10) - .serverReference("localhost:1883") - .will() - .topic("willTopic") - .delay(10) - .qos("AT_MOST_ONCE") - .flags("RETAIN") - .expiryInterval(20) - .contentType("message") - .format("TEXT") - .responseTopic("willResponseTopic") - .correlation("correlationData") - .userProperty("key1", "value1") - .payload("will message") - .build() + .clientId("client") + .serverRef("localhost:1883") .build() .build(); @@ -346,23 +295,10 @@ public void shouldMatchSessionBeginExtension() throws Exception .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x00) .session(s -> s - .clientId("client") + .flags(2) .expiry(10) - .serverReference("localhost:1883") - .will(c -> - { - c.topic("willTopic"); - c.delay(10); - c.qos(0); - c.flags(1); - c.expiryInterval(20); - c.contentType("message"); - c.format(f -> f.set(MqttPayloadFormat.TEXT)); - c.responseTopic("willResponseTopic"); - c.correlation(corr -> corr.bytes(b -> b.set("correlationData".getBytes(UTF_8)))); - c.propertiesItem(p -> p.key("key1").value("value1")); - c.payload(p -> p.bytes(b -> b.set("will message".getBytes(UTF_8)))); - })) + .clientId("client") + .serverRef("localhost:1883")) .build(); assertNotNull(matcher.match(byteBuf)); @@ -373,8 +309,8 @@ public void shouldMatchSessionBeginExtensionWithEmptyFields() throws Exception { BytesMatcher matcher = MqttFunctions.matchBeginEx() .session() - .clientId("client") .expiry(10) + .clientId("client") .build() .build(); @@ -384,8 +320,8 @@ public void shouldMatchSessionBeginExtensionWithEmptyFields() throws Exception .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x00) .session(s -> s - .clientId("client") - .expiry(10)) + .expiry(10) + .clientId("client")) .build(); assertNotNull(matcher.match(byteBuf)); @@ -396,20 +332,8 @@ public void shouldMatchSessionBeginExtensionWithBytes() throws Exception { BytesMatcher matcher = MqttFunctions.matchBeginEx() .session() - .clientId("client") .expiry(10) - .will() - .topic("willTopic") - .delay(10) - .qos("AT_MOST_ONCE") - .expiryInterval(20) - .contentType("message") - .format("TEXT") - .responseTopic("willResponseTopic") - .correlationBytes("correlationData".getBytes(UTF_8)) - .userProperty("key1", "value1") - .payloadBytes("will message".getBytes(UTF_8)) - .build() + .clientId("client") .build() .build(); @@ -419,21 +343,8 @@ public void shouldMatchSessionBeginExtensionWithBytes() throws Exception .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x00) .session(s -> s - .clientId("client") .expiry(10) - .will(c -> - { - c.topic("willTopic"); - c.delay(10); - c.qos(0); - c.expiryInterval(20); - c.contentType("message"); - c.format(f -> f.set(MqttPayloadFormat.TEXT)); - c.responseTopic("willResponseTopic"); - c.correlation(corr -> corr.bytes(b -> b.set("correlationData".getBytes(UTF_8)))); - c.propertiesItem(p -> p.key("key1").value("value1")); - c.payload(p -> p.bytes(b -> b.set("will message".getBytes(UTF_8)))); - })) + .clientId("client")) .build(); assertNotNull(matcher.match(byteBuf)); @@ -1218,6 +1129,23 @@ public void shouldEncodeMqttPublishDataExWithNullUserPropertyValue() Objects.isNull(h.value().asString()))); } + @Test + public void shouldEncodeMqttSessionDataEx() + { + final byte[] array = MqttFunctions.dataEx() + .typeId(0) + .session() + .kind("WILL") + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttDataExFW mqttPublishDataEx = new MqttDataExFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals(0, mqttPublishDataEx.typeId()); + assertEquals("WILL", mqttPublishDataEx.session().kind().toString()); + } + @Test public void shouldEncodeMqttSubscribeFlushEx() { @@ -1240,32 +1168,18 @@ public void shouldEncodeMqttSubscribeFlushEx() 0b0001 == f.flags())); } - @Test - public void shouldEncodeMqttAbortExAsUnsubscribe() - { - final byte[] array = MqttFunctions.endEx() - .typeId(0) - .reason("KEEP_ALIVE_EXPIRY") - .build(); - - DirectBuffer buffer = new UnsafeBuffer(array); - MqttEndExFW mqttEndEx = new MqttEndExFW().wrap(buffer, 0, buffer.capacity()); - assertEquals(0, mqttEndEx.typeId()); - assertEquals(MqttEndReasonCode.KEEP_ALIVE_EXPIRY, mqttEndEx.reasonCode().get()); - } - @Test public void shouldEncodeMqttResetEx() { final byte[] array = MqttFunctions.resetEx() .typeId(0) - .serverReference("localhost:1883") + .serverRef("localhost:1883") .build(); DirectBuffer buffer = new UnsafeBuffer(array); MqttResetExFW mqttResetEx = new MqttResetExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttResetEx.typeId()); - assertEquals("localhost:1883", mqttResetEx.serverReference().asString()); + assertEquals("localhost:1883", mqttResetEx.serverRef().asString()); } @Test @@ -1292,4 +1206,64 @@ public void shouldEncodeMqttSessionState() 0 == f.qos() && 0b0000 == f.flags())); } + + @Test + public void shouldEncodeWillMessage() + { + final byte[] array = MqttFunctions.will() + .topic("will.client") + .delay(20) + .expiryInterval(15) + .contentType("message") + .format("TEXT") + .responseTopic("will.client.response") + .correlation("request-id-1") + .userProperty("name", "value") + .payload("client failed") + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttMessageFW willMessage = new MqttMessageFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals("will.client", willMessage.topic().asString()); + assertEquals(20, willMessage.delay()); + assertEquals(15, willMessage.expiryInterval()); + assertEquals("message", willMessage.contentType().asString()); + assertEquals("TEXT", willMessage.format().toString()); + assertEquals("will.client.response", willMessage.responseTopic().asString()); + assertEquals("request-id-1", willMessage.correlation() + .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); + assertNotNull(willMessage.properties() + .matchFirst(h -> + "name".equals(h.key().asString()) && + "value".equals(h.value().asString()))); + assertEquals("client failed", willMessage.payload() + .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); + } + + @Test + public void shouldEncodeWillMessageBytesPayload() + { + final byte[] array = MqttFunctions.will() + .topic("will.client") + .qos("AT_LEAST_ONCE") + .flags("RETAIN") + .responseTopic("response_topic") + .correlationBytes("request-id-1".getBytes(UTF_8)) + .payloadBytes(new byte[] {0, 1, 2, 3, 4, 5}) + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttMessageFW willMessage = new MqttMessageFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals("will.client", willMessage.topic().asString()); + assertEquals(1, willMessage.flags()); + assertEquals(0b0001, willMessage.flags()); + assertEquals("BINARY", willMessage.format().toString()); + assertEquals("response_topic", willMessage.responseTopic().asString()); + assertEquals("request-id-1", willMessage.correlation() + .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); + assertArrayEquals(new byte[] {0, 1, 2, 3, 4, 5}, willMessage.payload() + .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)).getBytes()); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java index 1a2e6ef51c..196930aaf6 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java @@ -57,18 +57,9 @@ public void shouldRemoveSessionAtCleanStart() throws Exception @Test @Specification({ - "${app}/session.will.message.disconnect.with.will.message/client", - "${app}/session.will.message.disconnect.with.will.message/server"}) - public void shouldSendReasonForEndAfterDisconnectWithWillMessage() throws Exception - { - k3po.finish(); - } - - @Test - @Specification({ - "${app}/session.will.message.no.ping.within.keep.alive/client", - "${app}/session.will.message.no.ping.within.keep.alive/server"}) - public void shouldSendReasonForEndAfterKeepAliveTimeout() throws Exception + "${app}/session.will.message.abort/client", + "${app}/session.will.message.abort/server"}) + public void shouldAbortSessionStreamWhenWillDelivery() throws Exception { k3po.finish(); } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java index 71c2b374a5..ff46963650 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java @@ -401,6 +401,15 @@ public void shouldNotReceivePublishPacketExceedingMaxPacketLimit() throws Except k3po.finish(); } + @Test + @Specification({ + "${net}/connect.reject.packet.too.large/client", + "${net}/connect.reject.packet.too.large/server"}) + public void shouldRejectPacketTooLarge() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${net}/connect.subscribe.unfragmented/client", diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java index 010f0bcde3..8911a941c9 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java @@ -300,4 +300,13 @@ public void shouldPublishOneMessageThenSubscribeUnfragmented() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/publish.reject.packet.too.large/client", + "${net}/publish.reject.packet.too.large/server"}) + public void shouldRejectPacketTooLarge() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java index 00fbf922a6..5ec4d0cf53 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java @@ -36,6 +36,7 @@ public final class MqttReasonCodes public static final byte UNSPECIFIED_ERROR = (byte) 0x80; public static final byte MALFORMED_PACKET = (byte) 0x81; public static final byte PROTOCOL_ERROR = (byte) 0x82; + public static final byte PACKET_TOO_LARGE = (byte) 0x95; public static final byte WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED = (byte) 0xa2; public static final byte SHARED_SUBSCRIPTION_NOT_SUPPORTED = (byte) 0x9e; public static final byte SUBSCRIPTION_IDS_NOT_SUPPORTED = (byte) 0xa1; diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 4b4ead716e..5daa0866e1 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -23,6 +23,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.MALFORMED_PACKET; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.NORMAL_DISCONNECT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.NO_SUBSCRIPTION_EXISTED; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.PACKET_TOO_LARGE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.PAYLOAD_FORMAT_INVALID; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.PROTOCOL_ERROR; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.QOS_NOT_SUPPORTED; @@ -111,7 +112,6 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttBinaryFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttEndReasonCode; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttMessageFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttQoS; @@ -147,10 +147,10 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.FlushFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttDataExFW; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttEndExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttPublishDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttResetExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSessionDataKind; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.ResetFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.SignalFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.WindowFW; @@ -242,7 +242,6 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttBeginExFW.Builder mqttPublishBeginExRW = new MqttBeginExFW.Builder(); private final MqttBeginExFW.Builder mqttSubscribeBeginExRW = new MqttBeginExFW.Builder(); private final MqttBeginExFW.Builder mqttSessionBeginExRW = new MqttBeginExFW.Builder(); - private final MqttEndExFW.Builder mqttEndExRW = new MqttEndExFW.Builder(); private final MqttDataExFW.Builder mqttPublishDataExRW = new MqttDataExFW.Builder(); private final MqttDataExFW.Builder mqttSubscribeDataExRW = new MqttDataExFW.Builder(); private final MqttDataExFW.Builder mqttSessionDataExRW = new MqttDataExFW.Builder(); @@ -304,6 +303,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttServerDecoder decodeInitialType = this::decodeInitialType; private final MqttServerDecoder decodePacketType = this::decodePacketType; private final MqttServerDecoder decodeConnect = this::decodeConnect; + private final MqttServerDecoder decodeConnectPayload = this::decodeConnectPayload; private final MqttServerDecoder decodePublish = this::decodePublish; private final MqttServerDecoder decodeSubscribe = this::decodeSubscribe; private final MqttServerDecoder decodeUnsubscribe = this::decodeUnsubscribe; @@ -314,7 +314,8 @@ public final class MqttServerFactory implements MqttStreamFactory private final Map decodersByPacketType; private final boolean session; - private final String serverReference; + private final String serverRef; + private int maximumPacketSize; { final Map decodersByPacketType = new EnumMap<>(MqttPacketType.class); @@ -415,6 +416,7 @@ public MqttServerFactory( this.keepAliveMinimum = config.keepAliveMinimum(); this.keepAliveMaximum = config.keepAliveMaximum(); this.maximumQos = config.maximumQos(); + this.maximumPacketSize = writeBuffer.capacity(); this.retainedMessages = config.retainAvailable() ? (byte) 1 : 0; this.wildcardSubscriptions = config.wildcardSubscriptionAvailable() ? (byte) 1 : 0; this.subscriptionIdentifiers = config.subscriptionIdentifierAvailable() ? (byte) 1 : 0; @@ -429,7 +431,7 @@ public MqttServerFactory( final Optional clientId = Optional.ofNullable(config.clientId()).map(String16FW::new); this.supplyClientId = clientId.isPresent() ? clientId::get : () -> new String16FW(UUID.randomUUID().toString()); - this.serverReference = config.serverReference(); + this.serverRef = config.serverReference(); } @Override @@ -762,11 +764,8 @@ private int decodeInitialType( break decode; } - if (limit - packet.limit() >= length) - { - server.decodeablePacketBytes = packet.sizeof() + length; - server.decoder = decodePacketType; - } + server.decodeablePacketBytes = packet.sizeof() + length; + server.decoder = decodePacketType; } return offset; @@ -789,7 +788,12 @@ private int decodePacketType( final MqttPacketType packetType = MqttPacketType.valueOf(packet.typeAndFlags() >> 4); final MqttServerDecoder decoder = decodersByPacketType.getOrDefault(packetType, decodeUnknownType); - if (limit - packet.limit() >= length) + if (packet.sizeof() + length > maximumPacketSize) + { + server.onDecodeError(traceId, authorization, PACKET_TOO_LARGE); + server.decoder = decodeIgnoreAll; + } + else if (limit - packet.limit() >= length) { server.decodeablePacketBytes = packet.sizeof() + length; server.decoder = decoder; @@ -817,6 +821,7 @@ private int decodeConnect( int reasonCode = SUCCESS; final MqttConnectFW mqttConnect = mqttConnectRO.tryWrap(buffer, offset, limit); + int flags = 0; decode: { if (mqttConnect == null) @@ -825,7 +830,8 @@ private int decodeConnect( break decode; } - int flags = mqttConnect.flags(); + server.decodableRemainingBytes = mqttConnect.remainingLength(); + flags = mqttConnect.flags(); reasonCode = decodeConnectType(mqttConnect, flags); if (reasonCode != SUCCESS) @@ -846,6 +852,9 @@ private int decodeConnect( } progress = server.onDecodeConnect(traceId, authorization, buffer, progress, limit, mqttConnect); + + final int decodedLength = progress - offset - 2; + server.decodableRemainingBytes -= decodedLength; } if (reasonCode != SUCCESS) @@ -853,6 +862,31 @@ private int decodeConnect( server.onDecodeError(traceId, authorization, reasonCode); server.decoder = decodeIgnoreAll; } + else + { + server.decoder = decodeConnectPayload; + } + } + + return progress; + } + + private int decodeConnectPayload( + MqttServer server, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + int progress = offset; + + progress = server.onDecodeConnectPayload(traceId, authorization, buffer, progress, limit); + server.decodableRemainingBytes -= progress - offset; + if (server.decodableRemainingBytes == 0) + { + server.decoder = decodePacketType; } return progress; @@ -1219,19 +1253,19 @@ private final class MqttServer private long keepAliveTimeoutAt; private boolean serverDefinedKeepAlive = false; - private boolean cleanStart = false; private short keepAlive; private long keepAliveTimeout; + private int connectFlags; private boolean connected; private short topicAliasMaximum = 0; - private int maximumPacketSize = Integer.MAX_VALUE; private int sessionExpiryInterval = 0; private boolean assignedClientId = false; private int propertyMask = 0; private int state; private long sessionId; + private int decodableRemainingBytes; private MqttServer( Function credentials, @@ -1484,10 +1518,7 @@ private void onKeepAliveTimeoutSignal( { if (session) { - final MqttEndExFW.Builder builder = mqttEndExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) - .typeId(mqttTypeId) - .reasonCode(r -> r.set(MqttEndReasonCode.KEEP_ALIVE_EXPIRY)); - sessionStream.doSessionAppEnd(traceId, builder.build()); + sessionStream.doSessionAbort(traceId); } onDecodeError(traceId, authorization, KEEP_ALIVE_TIMEOUT); decoder = decodeIgnoreAll; @@ -1563,14 +1594,15 @@ private byte decodeConnectProperties( break; case KIND_RECEIVE_MAXIMUM: case KIND_MAXIMUM_PACKET_SIZE: - final int maximumPacketSize = (int) mqttProperty.maximumPacketSize(); - if (maximumPacketSize == 0 || isSetMaximumPacketSize(propertyMask)) + final int maxConnectPacketSize = (int) mqttProperty.maximumPacketSize(); + if (maxConnectPacketSize == 0 || isSetMaximumPacketSize(propertyMask)) { reasonCode = PROTOCOL_ERROR; break decode; } this.propertyMask |= CONNECT_TOPIC_ALIAS_MAXIMUM_MASK; - this.maximumPacketSize = maximumPacketSize; + //TODO: remove this once we will support large messages + maximumPacketSize = Math.min(maxConnectPacketSize, maximumPacketSize); break; case KIND_REQUEST_RESPONSE_INFORMATION: case KIND_REQUEST_PROBLEM_INFORMATION: @@ -1639,23 +1671,54 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) break decode; } + final MqttBindingConfig binding = bindings.get(routedId); + + final MqttRouteConfig resolved = binding != null ? binding.resolve(authorization) : null; + + keepAlive = (short) Math.min(Math.max(connect.keepAlive(), keepAliveMinimum), keepAliveMaximum); + serverDefinedKeepAlive = keepAlive != connect.keepAlive(); + keepAliveTimeout = Math.round(TimeUnit.SECONDS.toMillis(keepAlive) * 1.5); + connectFlags = connect.flags(); + doSignalKeepAliveTimeout(); + + if (session) + { + resolveSession(traceId, authorization, resolved.id, connectFlags); + } + + doCancelConnectTimeout(); + } + + progress = connect.limit(); + if (reasonCode != SUCCESS) + { + doCancelConnectTimeout(); + doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); + doNetworkEnd(traceId, authorization); + decoder = decodeIgnoreAll; + } + + return progress; + } + + private int onDecodeConnectPayload( + long traceId, + long authorization, + DirectBuffer buffer, + int progress, + int limit) + { + byte reasonCode; + decode: + { final MqttConnectPayload payload = mqttConnectPayloadRO.reset(); - progress = connect.limit(); - progress = payload.decode(buffer, progress, limit, connect.flags()); + int connectPayloadLimit = payload.decode(buffer, progress, limit, connectFlags); reasonCode = payload.reasonCode; if (reasonCode != SUCCESS) { break decode; } - if (isCleanStart(connect.flags())) - { - this.cleanStart = true; - } - keepAlive = (short) Math.min(Math.max(connect.keepAlive(), keepAliveMinimum), keepAliveMaximum); - serverDefinedKeepAlive = keepAlive != connect.keepAlive(); - keepAliveTimeout = Math.round(TimeUnit.SECONDS.toMillis(keepAlive) * 1.5); - doSignalKeepAliveTimeout(); long sessionAuth = authorization; if (guard != null) @@ -1690,36 +1753,75 @@ else if (this.authField.equals(MqttConnectProperty.PASSWORD)) } this.sessionId = sessionAuth; - if (session) - { - resolveSession(traceId, sessionAuth, resolved.id, connect, payload); - } - else + + if (!session) { - doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); + doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, + false, null); connected = true; } - doCancelConnectTimeout(); + final int flags = connectFlags; + final boolean willFlagSet = isSetWillFlag(flags); + final int willFlags = decodeWillFlags(flags); + final int willQos = decodeWillQos(flags); - decoder = decodePacketType; - } + if (session && willFlagSet) + { + final MqttDataExFW.Builder sessionDataExBuilder = + mqttSessionDataExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(s -> s.kind(k -> k.set(MqttSessionDataKind.WILL))); - if (reasonCode == BAD_USER_NAME_OR_PASSWORD) - { - doCancelConnectTimeout(); - doNetworkEnd(traceId, authorization); - decoder = decodeIgnoreAll; - progress = connect.limit(); + final MqttMessageFW.Builder willMessageBuilder = + mqttMessageFW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity()) + .topic(payload.willTopic) + .delay(payload.willDelay) + .qos(willQos) + .flags(willFlags) + .expiryInterval(payload.expiryInterval) + .contentType(payload.contentType) + .format(f -> f.set(payload.payloadFormat)) + .responseTopic(payload.responseTopic) + .correlation(c -> c.bytes(payload.correlationData)); + + final Array32FW userProperties = willUserPropertiesRW.build(); + userProperties.forEach( + c -> willMessageBuilder.propertiesItem(p -> p.key(c.key()).value(c.value()))); + willMessageBuilder.payload(p -> p.bytes(payload.willPayload.bytes())); + + final MqttMessageFW will = willMessageBuilder.build(); + final int willPayloadSize = willMessageBuilder.sizeof(); + + if (!sessionStream.hasSessionWindow(willPayloadSize)) + { + break decode; + } + sessionStream.doSessionData(traceId, willPayloadSize, sessionDataExBuilder.build(), will); + } + progress = connectPayloadLimit; } - else if (reasonCode != SUCCESS) + + if (reasonCode != SUCCESS) { doCancelConnectTimeout(); - doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); + + if (reasonCode != BAD_USER_NAME_OR_PASSWORD) + { + doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); + } + + if (session) + { + sessionStream.doSessionAppEnd(traceId, EMPTY_OCTETS); + } doNetworkEnd(traceId, authorization); + decoder = decodeIgnoreAll; - progress = connect.limit(); + progress = limit; } + + return progress; } @@ -1727,43 +1829,16 @@ private void resolveSession( long traceId, long authorization, long resolvedId, - MqttConnectFW connect, - MqttConnectPayload payload) + int flags) { - final int flags = connect.flags(); - - final boolean willFlagSet = isSetWillFlag(flags); - final MqttBeginExFW.Builder builder = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) .typeId(mqttTypeId) - .session(sessionBuilder -> - { - sessionBuilder.clientId(clientId); - sessionBuilder.expiry(sessionExpiryInterval); - sessionBuilder.serverReference(serverReference); - if (willFlagSet) - { - final int willFlags = decodeWillFlags(flags); - final int willQos = decodeWillQos(flags); - final MqttMessageFW.Builder willMessageBuilder = - mqttMessageFW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity()) - .topic(payload.willTopic) - .delay(payload.willDelay) - .qos(willQos) - .flags(willFlags) - .expiryInterval(payload.expiryInterval) - .contentType(payload.contentType) - .format(f -> f.set(payload.payloadFormat)) - .responseTopic(payload.responseTopic) - .correlation(c -> c.bytes(payload.correlationData)); - - final Array32FW userProperties = willUserPropertiesRW.build(); - userProperties.forEach( - c -> willMessageBuilder.propertiesItem(p -> p.key(c.key()).value(c.value()))); - willMessageBuilder.payload(p -> p.bytes(payload.willPayload.bytes())); - sessionBuilder.will(willMessageBuilder.build()); - } - }); + .session(sessionBuilder -> sessionBuilder + .flags(flags) + .expiry(sessionExpiryInterval) + .clientId(clientId) + .serverRef(serverRef) + ); if (sessionStream == null) { @@ -1945,6 +2020,11 @@ private void onDecodeSubscribe( if (session) { + final MqttDataExFW.Builder sessionDataExBuilder = + mqttSessionDataExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder.kind(k -> k.set(MqttSessionDataKind.STATE))); + final MqttSessionStateFW.Builder state = mqttSessionStateFW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()); @@ -1968,7 +2048,7 @@ private void onDecodeSubscribe( final MqttSessionStateFW sessionState = state.build(); final int payloadSize = sessionState.sizeof(); - sessionStream.doSessionData(traceId, payloadSize, sessionState); + sessionStream.doSessionData(traceId, payloadSize, sessionDataExBuilder.build(), sessionState); } else { @@ -2078,6 +2158,11 @@ private void doSendSessionState( long traceId, List topicFilters) { + final MqttDataExFW.Builder sessionDataExBuilder = + mqttSessionDataExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder.kind(k -> k.set(MqttSessionDataKind.STATE))); + List currentState = sessionStream.subscriptions(); List newState = currentState.stream() .filter(subscription -> !topicFilters.contains(subscription.filter)) @@ -2098,7 +2183,7 @@ private void doSendSessionState( final MqttSessionStateFW sessionState = sessionStateBuilder.build(); final int payloadSize = sessionState.sizeof(); - sessionStream.doSessionData(traceId, payloadSize, sessionState); + sessionStream.doSessionData(traceId, payloadSize, sessionDataExBuilder.build(), sessionState); } private void sendUnsuback( @@ -2162,13 +2247,14 @@ private void onDecodeDisconnect( state = MqttState.closingInitial(state); if (session) { - final MqttEndExFW.Builder builder = mqttEndExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) - .typeId(mqttTypeId) - .reasonCode(r -> r.set( - disconnect.reasonCode() == DISCONNECT_WITH_WILL_MESSAGE ? - MqttEndReasonCode.DISCONNECT_WITH_WILL : - MqttEndReasonCode.DISCONNECT)); - sessionStream.doSessionAppEnd(traceId, builder.build()); + if (disconnect.reasonCode() == DISCONNECT_WITH_WILL_MESSAGE) + { + sessionStream.doSessionAbort(traceId); + } + else + { + sessionStream.doSessionAppEnd(traceId, EMPTY_OCTETS); + } } closeStreams(traceId, authorization); doNetworkEnd(traceId, authorization); @@ -2464,6 +2550,12 @@ private void doEncodeConnack( MqttPropertyFW mqttProperty; if (reasonCode == SUCCESS) { + //TODO: remove this once we support large messages + mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) + .maximumPacketSize(maximumPacketSize) + .build(); + propertiesSize = mqttProperty.limit(); + if (sessionExpiryInterval > sessionExpiryIntervalLimit) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) @@ -3020,6 +3112,7 @@ private void onSessionWindow( final long authorization = window.authorization(); final long budgetId = window.budgetId(); final int padding = window.padding(); + final boolean wasOpen = MqttState.initialOpened(state); if (!MqttState.initialOpened(state)) { @@ -3040,6 +3133,11 @@ private void onSessionWindow( assert initialAck <= initialSeq; + if (!wasOpen) + { + decodeNetwork(traceId); + } + if (budgetId != 0L && debitorIndex == NO_DEBITOR_INDEX) { debitor = supplyDebitor.apply(budgetId); @@ -3065,20 +3163,20 @@ private void onSessionReset( if (mqttResetEx != null) { - String16FW serverReference = mqttResetEx.serverReference(); - boolean serverReferenceExists = serverReference != null; + String16FW serverRef = mqttResetEx.serverRef(); + boolean serverRefExists = serverRef != null; - byte reasonCode = serverReferenceExists ? SERVER_MOVED : SESSION_TAKEN_OVER; + byte reasonCode = serverRefExists ? SERVER_MOVED : SESSION_TAKEN_OVER; if (!connected) { doCancelConnectTimeout(); doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, - false, serverReference); + false, serverRef); } else { - doEncodeDisconnect(traceId, authorization, reasonCode, serverReferenceExists ? serverReference : null); + doEncodeDisconnect(traceId, authorization, reasonCode, serverRefExists ? serverRef : null); } } setInitialClosed(); @@ -3150,9 +3248,9 @@ private void onSessionData( boolean sessionPresent = false; if (sessionState != null) { - if (cleanStart) + if (isCleanStart(connectFlags)) { - doSessionData(traceId, 0, emptyRO); + doSessionData(traceId, 0, emptyRO, emptyRO); } else { @@ -3274,16 +3372,23 @@ private void doSessionBegin( } } + private boolean hasSessionWindow( + int length) + { + return initialMax - (initialSeq - initialAck) >= length + initialPad; + } + private void doSessionData( long traceId, int reserved, - Flyweight sessionState) + Flyweight dataEx, + Flyweight payload) { assert MqttState.initialOpening(state); - final DirectBuffer buffer = sessionState.buffer(); - final int offset = sessionState.offset(); - final int limit = sessionState.limit(); + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); final int length = limit - offset; assert reserved >= length + initialPad; @@ -3292,7 +3397,7 @@ private void doSessionData( if (!MqttState.closed(state)) { doData(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, sessionId, budgetId, reserved, buffer, offset, length, EMPTY_OCTETS); + traceId, sessionId, budgetId, reserved, buffer, offset, length, dataEx); initialSeq += reserved; assert initialSeq <= initialAck + initialMax; diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java index dedc52e7bd..95fd384785 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java @@ -25,6 +25,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; +import static io.aklivity.zilla.runtime.engine.test.EngineRule.ENGINE_BUFFER_SLOT_CAPACITY_NAME; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -624,4 +625,19 @@ public void shouldConnectAndSubscribeUnfragmented() throws Exception { k3po.finish(); } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/connect.reject.packet.too.large/client"}) + @Configure(name = SESSION_AVAILABLE_NAME, value = "false") + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") + @Configure(name = ENGINE_BUFFER_SLOT_CAPACITY_NAME, value = "8192") + public void shouldRejectPacketTooLarge() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java index 389414a5eb..078a49afc9 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java @@ -25,6 +25,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.TOPIC_ALIAS_MAXIMUM_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; +import static io.aklivity.zilla.runtime.engine.test.EngineRule.ENGINE_BUFFER_SLOT_CAPACITY_NAME; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -478,4 +479,19 @@ public void shouldPublishEmptyMessage() throws Exception { k3po.finish(); } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/publish.reject.packet.too.large/client"}) + @Configure(name = SESSION_AVAILABLE_NAME, value = "false") + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") + @Configure(name = ENGINE_BUFFER_SLOT_CAPACITY_NAME, value = "8192") + public void shouldRejectPacketTooLarge() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java index 489b9c62ae..9937e3a2b1 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java @@ -172,6 +172,21 @@ public void shouldStoreWillMessageInSessionState() throws Exception k3po.finish(); } + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/session.connect.payload.fragmented/client", + "${app}/session.will.message.retain/server"}) + @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") + public void shouldStoreWillMessageInSessionStatePayloadFragmented() throws Exception + { + k3po.finish(); + } + + @Test @Configuration("server.yaml") @Specification({ @@ -190,7 +205,7 @@ public void shouldCloseSessionNormalDisconnect() throws Exception @Configuration("server.yaml") @Specification({ "${net}/session.will.message.disconnect.with.will.message/client", - "${app}/session.will.message.disconnect.with.will.message/server"}) + "${app}/session.will.message.abort/server"}) @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @@ -204,7 +219,7 @@ public void shouldCloseSessionDisconnectWithWill() throws Exception @Configuration("server.yaml") @Specification({ "${net}/session.will.message.no.ping.within.keep.alive/client", - "${app}/session.will.message.no.ping.within.keep.alive/server"}) + "${app}/session.will.message.abort/server"}) @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") diff --git a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java index 66636d3340..52a15e91ec 100644 --- a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java +++ b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java @@ -45,8 +45,6 @@ import io.aklivity.zilla.runtime.command.log.internal.types.KafkaPartitionFW; import io.aklivity.zilla.runtime.command.log.internal.types.KafkaSkipFW; import io.aklivity.zilla.runtime.command.log.internal.types.KafkaValueMatchFW; -import io.aklivity.zilla.runtime.command.log.internal.types.MqttEndReasonCode; -import io.aklivity.zilla.runtime.command.log.internal.types.MqttMessageFW; import io.aklivity.zilla.runtime.command.log.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.command.log.internal.types.MqttTopicFilterFW; import io.aklivity.zilla.runtime.command.log.internal.types.MqttUserPropertyFW; @@ -99,7 +97,6 @@ import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaResetExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttDataExFW; -import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttEndExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttPublishBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttPublishDataExFW; @@ -145,7 +142,6 @@ public final class LoggableStream implements AutoCloseable private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); private final MqttFlushExFW mqttFlushExRO = new MqttFlushExFW(); - private final MqttEndExFW mqttEndExRO = new MqttEndExFW(); private final AmqpBeginExFW amqpBeginExRO = new AmqpBeginExFW(); private final AmqpDataExFW amqpDataExRO = new AmqpDataExFW(); @@ -269,7 +265,6 @@ public final class LoggableStream implements AutoCloseable beginHandlers.put(labels.lookupLabelId("mqtt"), this::onMqttBeginEx); dataHandlers.put(labels.lookupLabelId("mqtt"), this::onMqttDataEx); flushHandlers.put(labels.lookupLabelId("mqtt"), this::onMqttFlushEx); - endHandlers.put(labels.lookupLabelId("mqtt"), this::onMqttEndEx); } if (hasExtensionType.test("amqp")) @@ -1310,34 +1305,10 @@ private void onMqttSessionBeginEx( { final String clientId = session.clientId().asString(); final int expiry = session.expiry(); - final MqttMessageFW will = session.will(); + final String serverRef = session.serverRef().asString(); out.printf(verboseFormat, index, offset, timestamp, - format("[session] %s %d", clientId, expiry)); - if (will != null) - { - final String willTopic = will.topic().asString(); - final int delay = will.delay(); - final int flags = will.flags(); - final int expiryInterval = will.expiryInterval(); - final String contentType = will.contentType().asString(); - final MqttPayloadFormat format = will.format().get(); - final String responseTopic = will.responseTopic().asString(); - final String correlation = asString(will.correlation().bytes()); - final Array32FW userProperties = will.properties(); - final String payload = asString(will.payload().bytes()); - out.printf(verboseFormat, index, offset, timestamp, format("will topic: %s", willTopic)); - out.printf(verboseFormat, index, offset, timestamp, format("will delay: %d", delay)); - out.printf(verboseFormat, index, offset, timestamp, format("will flags: %d", flags)); - out.printf(verboseFormat, index, offset, timestamp, format("will expiry: %d", expiryInterval)); - out.printf(verboseFormat, index, offset, timestamp, format("will content type: %s", contentType)); - out.printf(verboseFormat, index, offset, timestamp, format("will format: %s", format.name())); - out.printf(verboseFormat, index, offset, timestamp, format("will response topic: %s", responseTopic)); - out.printf(verboseFormat, index, offset, timestamp, format("will correlation: %s", correlation)); - out.printf(verboseFormat, index, offset, timestamp, format("will payload: %s", payload)); - userProperties.forEach(u -> out.printf(verboseFormat, index, offset, timestamp, - format("will user property: %s %s ", u.key(), u.value()))); - } + format("[session] %s %d %s", clientId, expiry, serverRef)); } private void onMqttDataEx( @@ -1420,18 +1391,6 @@ private void onMqttFlushEx( format("%s %d %d", f.pattern(), f.subscriptionId(), f.flags()))); } - private void onMqttEndEx( - final EndFW end) - { - final int offset = end.offset() - HEADER_LENGTH; - final long timestamp = end.timestamp(); - final OctetsFW extension = end.extension(); - - final MqttEndExFW mqttEndEx = mqttEndExRO.wrap(extension.buffer(), extension.offset(), extension.limit()); - final MqttEndReasonCode reasonCode = mqttEndEx.reasonCode().get(); - out.printf(verboseFormat, index, offset, timestamp, format("%s", reasonCode.name())); - } - private void onAmqpBeginEx( final BeginFW begin) { From 97ff5cd82d649bd9e78500e8fb1a6ce3aac39ff2 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Tue, 29 Aug 2023 12:40:15 -0700 Subject: [PATCH 065/115] Update ignores to simplify local testing --- cloud/docker-image/src/main/docker/.gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/docker-image/src/main/docker/.gitignore b/cloud/docker-image/src/main/docker/.gitignore index ab5fa6ba9e..9b5c9c3e77 100644 --- a/cloud/docker-image/src/main/docker/.gitignore +++ b/cloud/docker-image/src/main/docker/.gitignore @@ -1,3 +1,5 @@ zpmw zpm.json zpm-lock.json +zilla.yaml +tls/ From 4146fd73f89e82d9d67d10b41fdb438697d615ba Mon Sep 17 00:00:00 2001 From: Attila Kreiner Date: Tue, 29 Aug 2023 23:24:26 +0200 Subject: [PATCH 066/115] Generate zilla.yaml from an OpenAPI definition (#324) --- cloud/docker-image/pom.xml | 6 + .../main/docker/incubator/zpm.json.template | 1 + incubator/command-config/COPYRIGHT | 12 + incubator/command-config/LICENSE | 114 +++++ incubator/command-config/NOTICE | 13 + incubator/command-config/NOTICE.template | 13 + incubator/command-config/mvnw | 310 +++++++++++ incubator/command-config/mvnw.cmd | 182 +++++++ incubator/command-config/pom.xml | 168 ++++++ .../internal/ZillaConfigCommandSpi.java | 30 ++ .../internal/airline/ConfigGenerator.java | 20 + .../internal/airline/ZillaConfigCommand.java | 72 +++ ...llaConfigCommandPathConverterProvider.java | 63 +++ .../OpenApiHttpProxyConfigGenerator.java | 480 ++++++++++++++++++ .../internal/openapi/model/BearerAuth.java | 20 + .../internal/openapi/model/Components.java | 22 + .../internal/openapi/model/OpenApi.java | 26 + .../internal/openapi/model/Operation.java | 23 + .../internal/openapi/model/PathItem.java | 27 + .../openapi/model/SecurityScheme.java | 20 + .../config/internal/openapi/model/Server.java | 20 + .../internal/openapi/model2/PathItem2.java | 62 +++ .../internal/openapi/model2/Server2.java | 41 ++ .../src/main/moditect/module-info.java | 32 ++ .../OpenApiHttpProxyConfigGeneratorTest.java | 95 ++++ .../openapi/http/proxy/complete/openapi.yaml | 169 ++++++ .../openapi/http/proxy/complete/zilla.yaml | 126 +++++ .../openapi/http/proxy/jwt/openapi.yaml | 168 ++++++ .../openapi/http/proxy/jwt/zilla.yaml | 81 +++ .../openapi/http/proxy/plain/openapi.yaml | 157 ++++++ .../openapi/http/proxy/plain/zilla.yaml | 54 ++ .../openapi/http/proxy/tls/openapi.yaml | 157 ++++++ .../openapi/http/proxy/tls/zilla.yaml | 94 ++++ incubator/pom.xml | 6 + .../runtime/engine/config/ConfigWriter.java | 42 +- .../engine/src/main/moditect/module-info.java | 1 + .../engine/config/ConfigWriterTest.java | 53 ++ 37 files changed, 2975 insertions(+), 5 deletions(-) create mode 100644 incubator/command-config/COPYRIGHT create mode 100644 incubator/command-config/LICENSE create mode 100644 incubator/command-config/NOTICE create mode 100644 incubator/command-config/NOTICE.template create mode 100755 incubator/command-config/mvnw create mode 100644 incubator/command-config/mvnw.cmd create mode 100644 incubator/command-config/pom.xml create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/ZillaConfigCommandSpi.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ConfigGenerator.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommandPathConverterProvider.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/BearerAuth.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Components.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/OpenApi.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Operation.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/PathItem.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/SecurityScheme.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Server.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/PathItem2.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/Server2.java create mode 100644 incubator/command-config/src/main/moditect/module-info.java create mode 100644 incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGeneratorTest.java create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/openapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/zilla.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/jwt/openapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/jwt/zilla.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/plain/openapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/plain/zilla.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/openapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/zilla.yaml diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index cf0618c24d..5db823424a 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -364,6 +364,12 @@ ${project.version} runtime + + ${project.groupId} + command-config + ${project.version} + runtime + diff --git a/cloud/docker-image/src/main/docker/incubator/zpm.json.template b/cloud/docker-image/src/main/docker/incubator/zpm.json.template index cfef01d5db..f50046cb33 100644 --- a/cloud/docker-image/src/main/docker/incubator/zpm.json.template +++ b/cloud/docker-image/src/main/docker/incubator/zpm.json.template @@ -33,6 +33,7 @@ "io.aklivity.zilla:binding-tls", "io.aklivity.zilla:binding-ws", "io.aklivity.zilla:command", + "io.aklivity.zilla:command-config", "io.aklivity.zilla:command-dump", "io.aklivity.zilla:command-metrics", "io.aklivity.zilla:command-start", diff --git a/incubator/command-config/COPYRIGHT b/incubator/command-config/COPYRIGHT new file mode 100644 index 0000000000..0cb10b6f62 --- /dev/null +++ b/incubator/command-config/COPYRIGHT @@ -0,0 +1,12 @@ +Copyright ${copyrightYears} Aklivity Inc + +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. diff --git a/incubator/command-config/LICENSE b/incubator/command-config/LICENSE new file mode 100644 index 0000000000..f6abb6327b --- /dev/null +++ b/incubator/command-config/LICENSE @@ -0,0 +1,114 @@ + Aklivity Community License Agreement + Version 1.0 + +This Aklivity Community License Agreement Version 1.0 (the “Agreement”) sets +forth the terms on which Aklivity, Inc. (“Aklivity”) makes available certain +software made available by Aklivity under this Agreement (the “Software”). BY +INSTALLING, DOWNLOADING, ACCESSING, USING OR DISTRIBUTING ANY OF THE SOFTWARE, +YOU AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE TO +SUCH TERMS AND CONDITIONS, YOU MUST NOT USE THE SOFTWARE. IF YOU ARE RECEIVING +THE SOFTWARE ON BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU +HAVE THE ACTUAL AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS +AGREEMENT ON BEHALF OF SUCH ENTITY. “Licensee” means you, an individual, or +the entity on whose behalf you are receiving the Software. + + 1. LICENSE GRANT AND CONDITIONS. + + 1.1 License. Subject to the terms and conditions of this Agreement, + Aklivity hereby grants to Licensee a non-exclusive, royalty-free, + worldwide, non-transferable, non-sublicenseable license during the term + of this Agreement to: (a) use the Software; (b) prepare modifications and + derivative works of the Software; (c) distribute the Software (including + without limitation in source code or object code form); and (d) reproduce + copies of the Software (the “License”). Licensee is not granted the + right to, and Licensee shall not, exercise the License for an Excluded + Purpose. For purposes of this Agreement, “Excluded Purpose” means making + available any software-as-a-service, platform-as-a-service, + infrastructure-as-a-service or other similar online service that competes + with Aklivity products or services that provide the Software. + + 1.2 Conditions. In consideration of the License, Licensee’s distribution + of the Software is subject to the following conditions: + + (a) Licensee must cause any Software modified by Licensee to carry + prominent notices stating that Licensee modified the Software. + + (b) On each Software copy, Licensee shall reproduce and not remove or + alter all Aklivity or third party copyright or other proprietary + notices contained in the Software, and Licensee must provide the + notice below with each copy. + + “This software is made available by Aklivity, Inc., under the + terms of the Aklivity Community License Agreement, Version 1.0 + located at http://www.Aklivity.io/Aklivity-community-license. BY + INSTALLING, DOWNLOADING, ACCESSING, USING OR DISTRIBUTING ANY OF + THE SOFTWARE, YOU AGREE TO THE TERMS OF SUCH LICENSE AGREEMENT.” + + 1.3 Licensee Modifications. Licensee may add its own copyright notices + to modifications made by Licensee and may provide additional or different + license terms and conditions for use, reproduction, or distribution of + Licensee’s modifications. While redistributing the Software or + modifications thereof, Licensee may choose to offer, for a fee or free of + charge, support, warranty, indemnity, or other obligations. Licensee, and + not Aklivity, will be responsible for any such obligations. + + 1.4 No Sublicensing. The License does not include the right to + sublicense the Software, however, each recipient to which Licensee + provides the Software may exercise the Licenses so long as such recipient + agrees to the terms and conditions of this Agreement. + + 2. TERM AND TERMINATION. This Agreement will continue unless and until + earlier terminated as set forth herein. If Licensee breaches any of its + conditions or obligations under this Agreement, this Agreement will + terminate automatically and the License will terminate automatically and + permanently. + + 3. INTELLECTUAL PROPERTY. As between the parties, Aklivity will retain all + right, title, and interest in the Software, and all intellectual property + rights therein. Aklivity hereby reserves all rights not expressly granted + to Licensee in this Agreement. Aklivity hereby reserves all rights in its + trademarks and service marks, and no licenses therein are granted in this + Agreement. + + 4. DISCLAIMER. Aklivity HEREBY DISCLAIMS ANY AND ALL WARRANTIES AND + CONDITIONS, EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, AND SPECIFICALLY + DISCLAIMS ANY WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR + PURPOSE, WITH RESPECT TO THE SOFTWARE. + + 5. LIMITATION OF LIABILITY. Aklivity WILL NOT BE LIABLE FOR ANY DAMAGES OF + ANY KIND, INCLUDING BUT NOT LIMITED TO, LOST PROFITS OR ANY CONSEQUENTIAL, + SPECIAL, INCIDENTAL, INDIRECT, OR DIRECT DAMAGES, HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, ARISING OUT OF THIS AGREEMENT. THE FOREGOING SHALL + APPLY TO THE EXTENT PERMITTED BY APPLICABLE LAW. + + 6.GENERAL. + + 6.1 Governing Law. This Agreement will be governed by and interpreted in + accordance with the laws of the state of California, without reference to + its conflict of laws principles. If Licensee is located within the + United States, all disputes arising out of this Agreement are subject to + the exclusive jurisdiction of courts located in Santa Clara County, + California. USA. If Licensee is located outside of the United States, + any dispute, controversy or claim arising out of or relating to this + Agreement will be referred to and finally determined by arbitration in + accordance with the JAMS International Arbitration Rules. The tribunal + will consist of one arbitrator. The place of arbitration will be Palo + Alto, California. The language to be used in the arbitral proceedings + will be English. Judgment upon the award rendered by the arbitrator may + be entered in any court having jurisdiction thereof. + + 6.2 Assignment. Licensee is not authorized to assign its rights under + this Agreement to any third party. Aklivity may freely assign its rights + under this Agreement to any third party. + + 6.3 Other. This Agreement is the entire agreement between the parties + regarding the subject matter hereof. No amendment or modification of + this Agreement will be valid or binding upon the parties unless made in + writing and signed by the duly authorized representatives of both + parties. In the event that any provision, including without limitation + any condition, of this Agreement is held to be unenforceable, this + Agreement and all licenses and rights granted hereunder will immediately + terminate. Waiver by Aklivity of a breach of any provision of this + Agreement or the failure by Aklivity to exercise any right hereunder + will not be construed as a waiver of any subsequent breach of that right + or as a waiver of any other right. \ No newline at end of file diff --git a/incubator/command-config/NOTICE b/incubator/command-config/NOTICE new file mode 100644 index 0000000000..9024d8926d --- /dev/null +++ b/incubator/command-config/NOTICE @@ -0,0 +1,13 @@ +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +This project includes: + diff --git a/incubator/command-config/NOTICE.template b/incubator/command-config/NOTICE.template new file mode 100644 index 0000000000..209ca12f74 --- /dev/null +++ b/incubator/command-config/NOTICE.template @@ -0,0 +1,13 @@ +Licensed under the Aklivity Community License (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + https://www.aklivity.io/aklivity-community-license/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +This project includes: +#GENERATED_NOTICES# diff --git a/incubator/command-config/mvnw b/incubator/command-config/mvnw new file mode 100755 index 0000000000..d2f0ea3808 --- /dev/null +++ b/incubator/command-config/mvnw @@ -0,0 +1,310 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/incubator/command-config/mvnw.cmd b/incubator/command-config/mvnw.cmd new file mode 100644 index 0000000000..b26ab24f03 --- /dev/null +++ b/incubator/command-config/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/incubator/command-config/pom.xml b/incubator/command-config/pom.xml new file mode 100644 index 0000000000..abffc062e9 --- /dev/null +++ b/incubator/command-config/pom.xml @@ -0,0 +1,168 @@ + + + + 4.0.0 + + io.aklivity.zilla + incubator + develop-SNAPSHOT + ../pom.xml + + + command-config + zilla::incubator::command-config + + + + Aklivity Community License Agreement + https://www.aklivity.io/aklivity-community-license/ + repo + + + + + 11 + 11 + 0.60 + 4 + + + + + ${project.groupId} + engine.spec + ${project.version} + provided + + + ${project.groupId} + engine + ${project.version} + provided + + + ${project.groupId} + command + ${project.version} + provided + + + io.aklivity.zilla + binding-http + ${project.version} + provided + + + io.aklivity.zilla + binding-tcp + ${project.version} + provided + + + io.aklivity.zilla + binding-tls + ${project.version} + provided + + + io.aklivity.zilla + guard-jwt + ${project.version} + provided + + + io.aklivity.zilla + vault-filesystem + ${project.version} + provided + + + org.junit.jupiter + junit-jupiter-engine + test + + + + + + + org.jasig.maven + maven-notice-plugin + + + com.mycila + license-maven-plugin + + + src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/**/* + + + + + maven-checkstyle-plugin + + + org.apache.maven.plugins + maven-compiler-plugin + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.moditect + moditect-maven-plugin + + + org.apache.maven.plugins + maven-failsafe-plugin + + + org.jacoco + jacoco-maven-plugin + + + io/aklivity/zilla/runtime/command/config/internal/types/**/*.class + io/aklivity/zilla/runtime/command/config/internal/openapi/model/*.class + + + + BUNDLE + + + INSTRUCTION + COVEREDRATIO + ${jacoco.coverage.ratio} + + + CLASS + MISSEDCOUNT + ${jacoco.missed.count} + + + + + + + + ${project.groupId} + flyweight-maven-plugin + ${project.version} + + core + io.aklivity.zilla.runtime.command.config.internal.types + + + + + generate + + + + + + + diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/ZillaConfigCommandSpi.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/ZillaConfigCommandSpi.java new file mode 100644 index 0000000000..6a526cf02b --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/ZillaConfigCommandSpi.java @@ -0,0 +1,30 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal; + +import com.github.rvesse.airline.builder.CliBuilder; + +import io.aklivity.zilla.runtime.command.ZillaCommandSpi; +import io.aklivity.zilla.runtime.command.config.internal.airline.ZillaConfigCommand; + +public class ZillaConfigCommandSpi implements ZillaCommandSpi +{ + @Override + public void mixin( + CliBuilder builder) + { + builder.withCommand(ZillaConfigCommand.class); + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ConfigGenerator.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ConfigGenerator.java new file mode 100644 index 0000000000..06462e5112 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ConfigGenerator.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.airline; + +public interface ConfigGenerator +{ + String generate(); +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java new file mode 100644 index 0000000000..c953e894ec --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java @@ -0,0 +1,72 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.airline; + +import static org.agrona.LangUtil.rethrowUnchecked; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import java.util.function.Function; + +import com.github.rvesse.airline.annotations.Command; +import com.github.rvesse.airline.annotations.Option; +import com.github.rvesse.airline.annotations.restrictions.AllowedValues; +import com.github.rvesse.airline.annotations.restrictions.Required; + +import io.aklivity.zilla.runtime.command.ZillaCommand; +import io.aklivity.zilla.runtime.command.config.internal.openapi.http.proxy.OpenApiHttpProxyConfigGenerator; + +@Command(name = "config", description = "Generate configuration file") +public final class ZillaConfigCommand extends ZillaCommand +{ + private static final Map> GENERATORS = Map.of( + "openapi.http.proxy", OpenApiHttpProxyConfigGenerator::new + ); + + @Option(name = {"-t", "--template"}, + description = "Template name") + @Required + @AllowedValues(allowedValues = {"openapi.http.proxy"}) + public String template; + + @Option(name = {"-i", "--input"}, + description = "Input filename", + typeConverterProvider = ZillaConfigCommandPathConverterProvider.class) + public Path input; + + @Option(name = {"-o", "--output"}, + description = "Output filename", + typeConverterProvider = ZillaConfigCommandPathConverterProvider.class) + public Path output = Paths.get("zilla.yaml"); + + @Override + public void run() + { + try (InputStream inputStream = new FileInputStream(input.toFile())) + { + ConfigGenerator generator = GENERATORS.get(template).apply(inputStream); + Files.writeString(output, generator.generate()); + } + catch (Exception ex) + { + ex.printStackTrace(); + rethrowUnchecked(ex); + } + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommandPathConverterProvider.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommandPathConverterProvider.java new file mode 100644 index 0000000000..6160cc405d --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommandPathConverterProvider.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.airline; + +import java.nio.file.Paths; + +import com.github.rvesse.airline.model.ArgumentsMetadata; +import com.github.rvesse.airline.model.OptionMetadata; +import com.github.rvesse.airline.parser.ParseState; +import com.github.rvesse.airline.types.TypeConverter; +import com.github.rvesse.airline.types.TypeConverterProvider; +import com.github.rvesse.airline.types.numerics.NumericTypeConverter; + +public final class ZillaConfigCommandPathConverterProvider implements TypeConverterProvider +{ + private final ZillaDumpCommandPathConverter converter = new ZillaDumpCommandPathConverter(); + + private final class ZillaDumpCommandPathConverter implements TypeConverter + { + @Override + public void setNumericConverter( + NumericTypeConverter converter) + { + } + + @Override + public Object convert( + String name, + Class type, + String value) + { + return Paths.get(value); + } + } + + @Override + public TypeConverter getTypeConverter( + OptionMetadata option, + ParseState state) + { + return converter; + } + + @Override + public TypeConverter getTypeConverter( + ArgumentsMetadata arguments, + ParseState state) + { + return converter; + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java new file mode 100644 index 0000000000..352820f39a --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java @@ -0,0 +1,480 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.http.proxy; + +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.engine.config.KindConfig.CLIENT; +import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; +import static java.util.Objects.requireNonNull; +import static org.agrona.LangUtil.rethrowUnchecked; + +import java.io.InputStream; +import java.net.URI; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +import jakarta.json.Json; +import jakarta.json.JsonPatch; +import jakarta.json.JsonPatchBuilder; +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; + +import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfigBuilder; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfig; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; +import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfig; +import io.aklivity.zilla.runtime.command.config.internal.airline.ConfigGenerator; +import io.aklivity.zilla.runtime.command.config.internal.openapi.model.OpenApi; +import io.aklivity.zilla.runtime.command.config.internal.openapi.model.Server; +import io.aklivity.zilla.runtime.command.config.internal.openapi.model2.PathItem2; +import io.aklivity.zilla.runtime.command.config.internal.openapi.model2.Server2; +import io.aklivity.zilla.runtime.engine.config.BindingConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.ConfigWriter; +import io.aklivity.zilla.runtime.engine.config.GuardedConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; +import io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.RouteConfigBuilder; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; + +public class OpenApiHttpProxyConfigGenerator implements ConfigGenerator +{ + private final InputStream inputStream; + + private OpenApi openApi; + private int[] allPorts; + private int[] httpPorts; + private int[] httpsPorts; + private boolean isPlainEnabled; + private boolean isTlsEnabled; + private Map securitySchemes; + private boolean isJwtEnabled; + + public OpenApiHttpProxyConfigGenerator( + InputStream inputStream) + { + this.inputStream = inputStream; + } + + public String generate() + { + this.openApi = parseOpenApi(inputStream); + this.allPorts = resolveAllPorts(); + this.httpPorts = resolvePortsForScheme("http"); + this.httpsPorts = resolvePortsForScheme("https"); + this.isPlainEnabled = httpPorts != null; + this.isTlsEnabled = httpsPorts != null; + this.securitySchemes = resolveSecuritySchemes(); + this.isJwtEnabled = !securitySchemes.isEmpty(); + ConfigWriter configWriter = new ConfigWriter(null); + String yaml = configWriter.write(createNamespace(), createEnvVarsPatch()); + return unquoteEnvVars(yaml); + } + + private OpenApi parseOpenApi( + InputStream inputStream) + { + OpenApi openApi = null; + try (Jsonb jsonb = JsonbBuilder.create()) + { + openApi = jsonb.fromJson(inputStream, OpenApi.class); + } + catch (Exception ex) + { + rethrowUnchecked(ex); + } + return openApi; + } + + private int[] resolveAllPorts() + { + int[] ports = new int[openApi.servers.size()]; + for (int i = 0; i < openApi.servers.size(); i++) + { + Server2 server2 = Server2.of(openApi.servers.get(i)); + URI url = server2.url(); + ports[i] = url.getPort(); + } + return ports; + } + + private int[] resolvePortsForScheme( + String scheme) + { + requireNonNull(scheme); + int[] ports = null; + URI url = findFirstServerUrlWithScheme(scheme); + if (url != null) + { + ports = new int[] {url.getPort()}; + } + return ports; + } + + private URI findFirstServerUrlWithScheme( + String scheme) + { + requireNonNull(scheme); + URI result = null; + for (Server server : openApi.servers) + { + Server2 server2 = Server2.of(server); + if (scheme.equals(server2.url().getScheme())) + { + result = server2.url(); + break; + } + } + return result; + } + + private Map resolveSecuritySchemes() + { + requireNonNull(openApi); + Map result = new HashMap<>(); + if (openApi.components != null && openApi.components.securitySchemes != null) + { + for (String securitySchemeName : openApi.components.securitySchemes.keySet()) + { + String guardType = openApi.components.securitySchemes.get(securitySchemeName).bearerFormat; + if ("jwt".equals(guardType)) + { + result.put(securitySchemeName, guardType); + } + } + } + return result; + } + + private NamespaceConfig createNamespace() + { + return NamespaceConfig.builder() + .name("example") + .binding() + .name("tcp_server0") + .type("tcp") + .kind(SERVER) + .options(TcpOptionsConfig::builder) + .host("0.0.0.0") + .ports(allPorts) + .build() + .inject(this::injectPlainTcpRoute) + .inject(this::injectTlsTcpRoute) + .build() + .inject(this::injectTlsServer) + .binding() + .name("http_server0") + .type("http") + .kind(SERVER) + .options(HttpOptionsConfig::builder) + .access() + .policy(CROSS_ORIGIN) + .build() + .inject(this::injectHttpServerOptions) + .build() + .inject(this::injectHttpServerRoutes) + .build() + .binding() + .name("http_client0") + .type("http") + .kind(CLIENT) + .exit(isTlsEnabled ? "tls_client0" : "tcp_client0") + .build() + .inject(this::injectTlsClient) + .binding() + .name("tcp_client0") + .type("tcp") + .kind(CLIENT) + .options(TcpOptionsConfig::builder) + .host("") // env + .ports(new int[]{0}) // env + .build() + .build() + .inject(this::injectGuard) + .inject(this::injectVaults) + .build(); + } + + private BindingConfigBuilder> injectPlainTcpRoute( + BindingConfigBuilder> binding) + { + if (isPlainEnabled) + { + binding + .route() + .when(TcpConditionConfig::builder) + .ports(httpPorts) + .build() + .exit("http_server0") + .build(); + } + return binding; + } + + private BindingConfigBuilder> injectTlsTcpRoute( + BindingConfigBuilder> binding) + { + if (isTlsEnabled) + { + binding + .route() + .when(TcpConditionConfig::builder) + .ports(httpsPorts) + .build() + .exit("tls_server0") + .build(); + } + return binding; + } + + private NamespaceConfigBuilder injectTlsServer( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .binding() + .name("tls_server0") + .type("tls") + .kind(SERVER) + .options(TlsOptionsConfig::builder) + .keys(List.of("")) // env + .sni(List.of("")) // env + .alpn(List.of("")) // env + .build() + .vault("server") + .exit("http_server0") + .build(); + } + return namespace; + } + + private HttpOptionsConfigBuilder>> injectHttpServerOptions( + HttpOptionsConfigBuilder>> options) + { + if (isJwtEnabled) + { + options + .authorization() + .name("jwt0") + .credentials() + .header() + .name("authorization") + .pattern("Bearer {credentials}") + .build() + .build() + .build(); + } + return options; + } + + private BindingConfigBuilder> injectHttpServerRoutes( + BindingConfigBuilder> binding) + { + for (String path : openApi.paths.keySet()) + { + PathItem2 item = PathItem2.of(openApi.paths.get(path)); + for (String method : item.methods().keySet()) + { + binding + .route() + .exit("http_client0") + .when(HttpConditionConfig::builder) + .header(":path", path.replaceAll("\\{[^}]+\\}", "*")) + .header(":method", method) + .build() + .inject(route -> injectHttpServerRouteGuarded(route, item, method)) + .build(); + } + } + return binding; + } + + private RouteConfigBuilder>> injectHttpServerRouteGuarded( + RouteConfigBuilder>> route, + PathItem2 item, + String method) + { + List>> security = item.methods().get(method).security; + if (security != null) + { + for (Map> securityItem : security) + { + for (String securityItemLabel : securityItem.keySet()) + { + if (isJwtEnabled && "jwt".equals(securitySchemes.get(securityItemLabel))) + { + route + .guarded() + .name("jwt0") + .inject(guarded -> injectGuardedRoles(guarded, securityItem.get(securityItemLabel))) + .build(); + } + } + } + } + return route; + } + + private GuardedConfigBuilder injectGuardedRoles( + GuardedConfigBuilder guarded, + List roles) + { + for (String role : roles) + { + guarded.role(role); + } + return guarded; + } + + private NamespaceConfigBuilder injectTlsClient( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .binding() + .name("tls_client0") + .type("tls") + .kind(CLIENT) + .options(TlsOptionsConfig::builder) + .trust(List.of("")) // env + .sni(List.of("")) // env + .alpn(List.of("")) // env + .trustcacerts(true) + .build() + .vault("client") + .exit("tcp_client0") + .build(); + } + return namespace; + } + + private NamespaceConfigBuilder injectGuard( + NamespaceConfigBuilder namespace) + { + if (isJwtEnabled) + { + namespace + .guard() + .name("jwt0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("") // env + .audience("") // env + .key() + .alg("").kty("").kid("").use("").n("").e("").crv("").x("").y("") // env + .build() + .build() + .build(); + } + return namespace; + } + + private NamespaceConfigBuilder injectVaults( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .vault() + .name("client") + .type("filesystem") + .options(FileSystemOptionsConfig::builder) + .trust() + .store("") // env + .type("") // env + .password("") // env + .build() + .build() + .build() + .vault() + .name("server") + .type("filesystem") + .options(FileSystemOptionsConfig::builder) + .keys() + .store("") // env + .type("") // env + .password("") //env + .build() + .build() + .build(); + } + return namespace; + } + + private JsonPatch createEnvVarsPatch() + { + JsonPatchBuilder patch = Json.createPatchBuilder(); + patch.replace("/bindings/tcp_client0/options/host", "${{env.TCP_CLIENT_HOST}}"); + patch.replace("/bindings/tcp_client0/options/port", "${{env.TCP_CLIENT_PORT}}"); + + if (isJwtEnabled) + { + // jwt0 guard + patch.replace("/guards/jwt0/options/issuer", "${{env.JWT_ISSUER}}"); + patch.replace("/guards/jwt0/options/audience", "${{env.JWT_AUDIENCE}}"); + patch.replace("/guards/jwt0/options/keys/0/alg", "${{env.JWT_ALG}}"); + patch.replace("/guards/jwt0/options/keys/0/kty", "${{env.JWT_KTY}}"); + patch.replace("/guards/jwt0/options/keys/0/kid", "${{env.JWT_KID}}"); + patch.replace("/guards/jwt0/options/keys/0/use", "${{env.JWT_USE}}"); + patch.replace("/guards/jwt0/options/keys/0/n", "${{env.JWT_N}}"); + patch.replace("/guards/jwt0/options/keys/0/e", "${{env.JWT_E}}"); + patch.replace("/guards/jwt0/options/keys/0/crv", "${{env.JWT_CRV}}"); + patch.replace("/guards/jwt0/options/keys/0/x", "${{env.JWT_X}}"); + patch.replace("/guards/jwt0/options/keys/0/y", "${{env.JWT_Y}}"); + } + + if (isTlsEnabled) + { + // tls_server0 binding + patch.replace("/bindings/tls_server0/options/keys/0", "${{env.TLS_SERVER_KEYS}}"); + patch.replace("/bindings/tls_server0/options/sni/0", "${{env.TLS_SERVER_SNI}}"); + patch.replace("/bindings/tls_server0/options/alpn/0", "${{env.TLS_SERVER_ALPN}}"); + // tls_client0 binding + patch.replace("/bindings/tls_client0/options/trust/0", "${{env.TLS_CLIENT_TRUST}}"); + patch.replace("/bindings/tls_client0/options/sni/0", "${{env.TLS_CLIENT_SNI}}"); + patch.replace("/bindings/tls_client0/options/alpn/0", "${{env.TLS_CLIENT_ALPN}}"); + // client vault + patch.replace("/vaults/client/options/trust/store", "${{env.TRUSTSTORE_PATH}}"); + patch.replace("/vaults/client/options/trust/type", "${{env.TRUSTSTORE_TYPE}}"); + patch.replace("/vaults/client/options/trust/password", "${{env.TRUSTSTORE_PASSWORD}}"); + // server vault + patch.replace("/vaults/server/options/keys/store", "${{env.KEYSTORE_PATH}}"); + patch.replace("/vaults/server/options/keys/type", "${{env.KEYSTORE_TYPE}}"); + patch.replace("/vaults/server/options/keys/password", "${{env.KEYSTORE_PASSWORD}}"); + } + + return patch.build(); + } + + private String unquoteEnvVars( + String yaml) + { + List unquotedEnvVars = List.of("TCP_CLIENT_PORT"); + for (String envVar : unquotedEnvVars) + { + yaml = yaml.replaceAll( + Pattern.quote(String.format("\"${{env.%s}}\"", envVar)), + String.format("\\${{env.%s}}", envVar) + ); + } + return yaml; + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/BearerAuth.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/BearerAuth.java new file mode 100644 index 0000000000..cc5224c3ad --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/BearerAuth.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model; + +public class BearerAuth +{ + public String bearerFormat; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Components.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Components.java new file mode 100644 index 0000000000..ba0ad10db6 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Components.java @@ -0,0 +1,22 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model; + +import java.util.Map; + +public class Components +{ + public Map securitySchemes; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/OpenApi.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/OpenApi.java new file mode 100644 index 0000000000..042f3173a5 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/OpenApi.java @@ -0,0 +1,26 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model; + +import java.util.LinkedHashMap; +import java.util.List; + +public class OpenApi +{ + public String openapi; + public List servers; + public LinkedHashMap paths; + public Components components; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Operation.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Operation.java new file mode 100644 index 0000000000..c877e5dc85 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Operation.java @@ -0,0 +1,23 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model; + +import java.util.List; +import java.util.Map; + +public class Operation +{ + public List>> security; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/PathItem.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/PathItem.java new file mode 100644 index 0000000000..7235334d61 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/PathItem.java @@ -0,0 +1,27 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model; + +public class PathItem +{ + public Operation get; + public Operation put; + public Operation post; + public Operation delete; + public Operation options; + public Operation head; + public Operation patch; + public Operation trace; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/SecurityScheme.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/SecurityScheme.java new file mode 100644 index 0000000000..31be12fcdb --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/SecurityScheme.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model; + +public class SecurityScheme +{ + public String bearerFormat; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Server.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Server.java new file mode 100644 index 0000000000..0d9072e758 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model/Server.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model; + +public class Server +{ + public String url; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/PathItem2.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/PathItem2.java new file mode 100644 index 0000000000..504ab65f23 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/PathItem2.java @@ -0,0 +1,62 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model2; + +import java.util.LinkedHashMap; +import java.util.Map; + +import io.aklivity.zilla.runtime.command.config.internal.openapi.model.Operation; +import io.aklivity.zilla.runtime.command.config.internal.openapi.model.PathItem; + +public class PathItem2 +{ + private final LinkedHashMap methods; + + public PathItem2( + PathItem pathItem) + { + this.methods = new LinkedHashMap<>(); + putIfNotNull(methods, "GET", pathItem.get); + putIfNotNull(methods, "PUT", pathItem.put); + putIfNotNull(methods, "POST", pathItem.post); + putIfNotNull(methods, "DELETE", pathItem.delete); + putIfNotNull(methods, "OPTIONS", pathItem.options); + putIfNotNull(methods, "HEAD", pathItem.head); + putIfNotNull(methods, "PATCH", pathItem.patch); + putIfNotNull(methods, "TRACE", pathItem.trace); + } + + public Map methods() + { + return methods; + } + + public static PathItem2 of( + PathItem pathItem) + { + return new PathItem2(pathItem); + } + + private static void putIfNotNull( + Map methods, + String method, + Operation operation) + { + if (operation != null) + { + methods.put(method, operation); + } + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/Server2.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/Server2.java new file mode 100644 index 0000000000..c37692435b --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/Server2.java @@ -0,0 +1,41 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.model2; + +import java.net.URI; + +import io.aklivity.zilla.runtime.command.config.internal.openapi.model.Server; + +public final class Server2 +{ + private URI url; + + private Server2( + Server server) + { + this.url = URI.create(server.url); + } + + public URI url() + { + return url; + } + + public static Server2 of( + Server server) + { + return new Server2(server); + } +} diff --git a/incubator/command-config/src/main/moditect/module-info.java b/incubator/command-config/src/main/moditect/module-info.java new file mode 100644 index 0000000000..8885ef4672 --- /dev/null +++ b/incubator/command-config/src/main/moditect/module-info.java @@ -0,0 +1,32 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +module io.aklivity.zilla.runtime.command.config +{ + requires io.aklivity.zilla.runtime.command; + requires io.aklivity.zilla.runtime.engine; + requires io.aklivity.zilla.runtime.binding.http; + requires io.aklivity.zilla.runtime.binding.tcp; + requires io.aklivity.zilla.runtime.binding.tls; + requires io.aklivity.zilla.runtime.guard.jwt; + requires io.aklivity.zilla.runtime.vault.filesystem; + + opens io.aklivity.zilla.runtime.command.config.internal.airline + to com.github.rvesse.airline; + + opens io.aklivity.zilla.runtime.command.config.internal.openapi.model; + + provides io.aklivity.zilla.runtime.command.ZillaCommandSpi + with io.aklivity.zilla.runtime.command.config.internal.ZillaConfigCommandSpi; +} diff --git a/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGeneratorTest.java b/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGeneratorTest.java new file mode 100644 index 0000000000..dd56ca1e5e --- /dev/null +++ b/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGeneratorTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.openapi.http.proxy; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import org.junit.jupiter.api.Test; + +public class OpenApiHttpProxyConfigGeneratorTest +{ + @Test + public void shouldGeneratePlainConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("plain/openapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("plain/zilla.yaml").getFile())); + OpenApiHttpProxyConfigGenerator generator = new OpenApiHttpProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } + + @Test + public void shouldGenerateJwtConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("jwt/openapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("jwt/zilla.yaml").getFile())); + OpenApiHttpProxyConfigGenerator generator = new OpenApiHttpProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } + + @Test + public void shouldGenerateTlsConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("tls/openapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("tls/zilla.yaml").getFile())); + OpenApiHttpProxyConfigGenerator generator = new OpenApiHttpProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } + + @Test + public void shouldGenerateCompleteConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("complete/openapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("complete/zilla.yaml").getFile())); + OpenApiHttpProxyConfigGenerator generator = new OpenApiHttpProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } +} diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/openapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/openapi.yaml new file mode 100644 index 0000000000..cfb9abf2fc --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/openapi.yaml @@ -0,0 +1,169 @@ +openapi: 3.1.0 +hello: $.openapi +info: + version: 1.0.0 + title: Zilla CRUD V1 + license: + name: Aklivity Community License +servers: + - url: http://localhost:8080 + - url: https://localhost:9090 +paths: + /items: + post: + summary: Create an item + operationId: createItem + tags: + - items + requestBody: + content: + 'application/json': + schema: + $ref: '#/components/schemas/Item' + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + security: + - bearerAuth: + - create:items + get: + summary: List all items + operationId: listItems + tags: + - items + parameters: + - name: limit + in: query + description: How many items to return at one time (max 100) + required: false + schema: + type: integer + maximum: 100 + format: int32 + responses: + '200': + description: A paged array of items + headers: + x-next: + description: A link to the next page of responses + schema: + type: string + content: + application/json: + schema: + $ref: "#/components/schemas/Items" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + security: + - bearerAuth: + - list:items + /items/{id}: + get: + summary: Get an item + operationId: showItemById + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to retrieve + schema: + type: string + responses: + '200': + description: Expected response to a valid request + content: + application/json: + schema: + $ref: "#/components/schemas/Item" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + put: + summary: Update an item by key + operationId: updateItem + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to update + schema: + type: string + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + delete: + summary: Delete an item by key + operationId: deleteItem + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to delete + schema: + type: string + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" +components: + schemas: + Item: + type: object + required: + - greeting + properties: + greeting: + type: string + tag: + type: string + Items: + type: array + maxItems: 100 + items: + $ref: "#/components/schemas/Item" + Error: + type: object + required: + - code + - message + properties: + code: + type: integer + format: int32 + message: + type: string + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: jwt diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/zilla.yaml new file mode 100644 index 0000000000..8d3a8b105e --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/zilla.yaml @@ -0,0 +1,126 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 8080 + - 9090 + routes: + - exit: http_server0 + when: + - port: 8080 + - exit: tls_server0 + when: + - port: 9090 + tls_server0: + vault: server + type: tls + kind: server + options: + keys: + - "${{env.TLS_SERVER_KEYS}}" + sni: + - "${{env.TLS_SERVER_SNI}}" + alpn: + - "${{env.TLS_SERVER_ALPN}}" + exit: http_server0 + http_server0: + type: http + kind: server + options: + access-control: + policy: cross-origin + authorization: + jwt0: + credentials: + headers: + authorization: "Bearer {credentials}" + routes: + - exit: http_client0 + when: + - headers: + :path: /items + :method: GET + guarded: + jwt0: + - list:items + - exit: http_client0 + when: + - headers: + :path: /items + :method: POST + guarded: + jwt0: + - create:items + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: GET + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: PUT + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: DELETE + http_client0: + type: http + kind: client + exit: tls_client0 + tls_client0: + vault: client + type: tls + kind: client + options: + trust: + - "${{env.TLS_CLIENT_TRUST}}" + trustcacerts: true + sni: + - "${{env.TLS_CLIENT_SNI}}" + alpn: + - "${{env.TLS_CLIENT_ALPN}}" + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} +guards: + jwt0: + type: jwt + options: + issuer: "${{env.JWT_ISSUER}}" + audience: "${{env.JWT_AUDIENCE}}" + keys: + - kty: "${{env.JWT_KTY}}" + "n": "${{env.JWT_N}}" + e: "${{env.JWT_E}}" + alg: "${{env.JWT_ALG}}" + crv: "${{env.JWT_CRV}}" + x: "${{env.JWT_X}}" + "y": "${{env.JWT_Y}}" + use: "${{env.JWT_USE}}" + kid: "${{env.JWT_KID}}" +vaults: + client: + type: filesystem + options: + trust: + store: "${{env.TRUSTSTORE_PATH}}" + type: "${{env.TRUSTSTORE_TYPE}}" + password: "${{env.TRUSTSTORE_PASSWORD}}" + server: + type: filesystem + options: + keys: + store: "${{env.KEYSTORE_PATH}}" + type: "${{env.KEYSTORE_TYPE}}" + password: "${{env.KEYSTORE_PASSWORD}}" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/jwt/openapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/jwt/openapi.yaml new file mode 100644 index 0000000000..3a7d12a913 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/jwt/openapi.yaml @@ -0,0 +1,168 @@ +openapi: 3.1.0 +hello: $.openapi +info: + version: 1.0.0 + title: Zilla CRUD V1 + license: + name: Aklivity Community License +servers: + - url: http://localhost:8080 +paths: + /items: + post: + summary: Create an item + operationId: createItem + tags: + - items + requestBody: + content: + 'application/json': + schema: + $ref: '#/components/schemas/Item' + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + security: + - bearerAuth: + - create:items + get: + summary: List all items + operationId: listItems + tags: + - items + parameters: + - name: limit + in: query + description: How many items to return at one time (max 100) + required: false + schema: + type: integer + maximum: 100 + format: int32 + responses: + '200': + description: A paged array of items + headers: + x-next: + description: A link to the next page of responses + schema: + type: string + content: + application/json: + schema: + $ref: "#/components/schemas/Items" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + security: + - bearerAuth: + - list:items + /items/{id}: + get: + summary: Get an item + operationId: showItemById + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to retrieve + schema: + type: string + responses: + '200': + description: Expected response to a valid request + content: + application/json: + schema: + $ref: "#/components/schemas/Item" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + put: + summary: Update an item by key + operationId: updateItem + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to update + schema: + type: string + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + delete: + summary: Delete an item by key + operationId: deleteItem + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to delete + schema: + type: string + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" +components: + schemas: + Item: + type: object + required: + - greeting + properties: + greeting: + type: string + tag: + type: string + Items: + type: array + maxItems: 100 + items: + $ref: "#/components/schemas/Item" + Error: + type: object + required: + - code + - message + properties: + code: + type: integer + format: int32 + message: + type: string + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: jwt diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/jwt/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/jwt/zilla.yaml new file mode 100644 index 0000000000..21fb5261cb --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/jwt/zilla.yaml @@ -0,0 +1,81 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: 8080 + routes: + - exit: http_server0 + when: + - port: 8080 + http_server0: + type: http + kind: server + options: + access-control: + policy: cross-origin + authorization: + jwt0: + credentials: + headers: + authorization: "Bearer {credentials}" + routes: + - exit: http_client0 + when: + - headers: + :path: /items + :method: GET + guarded: + jwt0: + - list:items + - exit: http_client0 + when: + - headers: + :path: /items + :method: POST + guarded: + jwt0: + - create:items + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: GET + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: PUT + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: DELETE + http_client0: + type: http + kind: client + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} +guards: + jwt0: + type: jwt + options: + issuer: "${{env.JWT_ISSUER}}" + audience: "${{env.JWT_AUDIENCE}}" + keys: + - kty: "${{env.JWT_KTY}}" + "n": "${{env.JWT_N}}" + e: "${{env.JWT_E}}" + alg: "${{env.JWT_ALG}}" + crv: "${{env.JWT_CRV}}" + x: "${{env.JWT_X}}" + "y": "${{env.JWT_Y}}" + use: "${{env.JWT_USE}}" + kid: "${{env.JWT_KID}}" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/plain/openapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/plain/openapi.yaml new file mode 100644 index 0000000000..712761e416 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/plain/openapi.yaml @@ -0,0 +1,157 @@ +openapi: 3.1.0 +hello: $.openapi +info: + version: 1.0.0 + title: Zilla CRUD V1 + license: + name: Aklivity Community License +servers: + - url: http://localhost:8080 +paths: + /items: + post: + summary: Create an item + operationId: createItem + tags: + - items + requestBody: + content: + 'application/json': + schema: + $ref: '#/components/schemas/Item' + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + get: + summary: List all items + operationId: listItems + tags: + - items + parameters: + - name: limit + in: query + description: How many items to return at one time (max 100) + required: false + schema: + type: integer + maximum: 100 + format: int32 + responses: + '200': + description: A paged array of items + headers: + x-next: + description: A link to the next page of responses + schema: + type: string + content: + application/json: + schema: + $ref: "#/components/schemas/Items" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + /items/{id}: + get: + summary: Get an item + operationId: showItemById + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to retrieve + schema: + type: string + responses: + '200': + description: Expected response to a valid request + content: + application/json: + schema: + $ref: "#/components/schemas/Item" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + put: + summary: Update an item by key + operationId: updateItem + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to update + schema: + type: string + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + delete: + summary: Delete an item by key + operationId: deleteItem + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to delete + schema: + type: string + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" +components: + schemas: + Item: + type: object + required: + - greeting + properties: + greeting: + type: string + tag: + type: string + Items: + type: array + maxItems: 100 + items: + $ref: "#/components/schemas/Item" + Error: + type: object + required: + - code + - message + properties: + code: + type: integer + format: int32 + message: + type: string diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/plain/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/plain/zilla.yaml new file mode 100644 index 0000000000..057c2a4c62 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/plain/zilla.yaml @@ -0,0 +1,54 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: 8080 + routes: + - exit: http_server0 + when: + - port: 8080 + http_server0: + type: http + kind: server + options: + access-control: + policy: cross-origin + routes: + - exit: http_client0 + when: + - headers: + :path: /items + :method: GET + - exit: http_client0 + when: + - headers: + :path: /items + :method: POST + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: GET + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: PUT + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: DELETE + http_client0: + type: http + kind: client + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/openapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/openapi.yaml new file mode 100644 index 0000000000..53b1c6d2b9 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/openapi.yaml @@ -0,0 +1,157 @@ +openapi: 3.1.0 +hello: $.openapi +info: + version: 1.0.0 + title: Zilla CRUD V1 + license: + name: Aklivity Community License +servers: + - url: https://localhost:9090 +paths: + /items: + post: + summary: Create an item + operationId: createItem + tags: + - items + requestBody: + content: + 'application/json': + schema: + $ref: '#/components/schemas/Item' + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + get: + summary: List all items + operationId: listItems + tags: + - items + parameters: + - name: limit + in: query + description: How many items to return at one time (max 100) + required: false + schema: + type: integer + maximum: 100 + format: int32 + responses: + '200': + description: A paged array of items + headers: + x-next: + description: A link to the next page of responses + schema: + type: string + content: + application/json: + schema: + $ref: "#/components/schemas/Items" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + /items/{id}: + get: + summary: Get an item + operationId: showItemById + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to retrieve + schema: + type: string + responses: + '200': + description: Expected response to a valid request + content: + application/json: + schema: + $ref: "#/components/schemas/Item" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + put: + summary: Update an item by key + operationId: updateItem + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to update + schema: + type: string + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + delete: + summary: Delete an item by key + operationId: deleteItem + tags: + - items + parameters: + - name: id + in: path + required: true + description: The id of the item to delete + schema: + type: string + responses: + '204': + description: No Content + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" +components: + schemas: + Item: + type: object + required: + - greeting + properties: + greeting: + type: string + tag: + type: string + Items: + type: array + maxItems: 100 + items: + $ref: "#/components/schemas/Item" + Error: + type: object + required: + - code + - message + properties: + code: + type: integer + format: int32 + message: + type: string diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/zilla.yaml new file mode 100644 index 0000000000..492f19f16c --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/zilla.yaml @@ -0,0 +1,94 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: 9090 + routes: + - exit: tls_server0 + when: + - port: 9090 + tls_server0: + vault: server + type: tls + kind: server + options: + keys: + - "${{env.TLS_SERVER_KEYS}}" + sni: + - "${{env.TLS_SERVER_SNI}}" + alpn: + - "${{env.TLS_SERVER_ALPN}}" + exit: http_server0 + http_server0: + type: http + kind: server + options: + access-control: + policy: cross-origin + routes: + - exit: http_client0 + when: + - headers: + :path: /items + :method: GET + - exit: http_client0 + when: + - headers: + :path: /items + :method: POST + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: GET + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: PUT + - exit: http_client0 + when: + - headers: + :path: /items/* + :method: DELETE + http_client0: + type: http + kind: client + exit: tls_client0 + tls_client0: + vault: client + type: tls + kind: client + options: + trust: + - "${{env.TLS_CLIENT_TRUST}}" + trustcacerts: true + sni: + - "${{env.TLS_CLIENT_SNI}}" + alpn: + - "${{env.TLS_CLIENT_ALPN}}" + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} +vaults: + client: + type: filesystem + options: + trust: + store: "${{env.TRUSTSTORE_PATH}}" + type: "${{env.TRUSTSTORE_TYPE}}" + password: "${{env.TRUSTSTORE_PASSWORD}}" + server: + type: filesystem + options: + keys: + store: "${{env.KEYSTORE_PATH}}" + type: "${{env.KEYSTORE_TYPE}}" + password: "${{env.KEYSTORE_PASSWORD}}" diff --git a/incubator/pom.xml b/incubator/pom.xml index de37e6d031..12a909c7b3 100644 --- a/incubator/pom.xml +++ b/incubator/pom.xml @@ -29,6 +29,7 @@ command-log command-dump command-tune + command-config exporter-otlp @@ -65,6 +66,11 @@ command-dump ${project.version}
+ + ${project.groupId} + command-config + ${project.version} + ${project.groupId} exporter-otlp diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java index ce08b2acae..dd45f7abd5 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/config/ConfigWriter.java @@ -19,11 +19,16 @@ import static com.fasterxml.jackson.dataformat.yaml.YAMLGenerator.Feature.WRITE_DOC_START_MARKER; import static org.agrona.LangUtil.rethrowUnchecked; +import java.io.StringReader; import java.io.StringWriter; import java.io.Writer; import java.util.LinkedList; import java.util.List; +import jakarta.json.JsonObject; +import jakarta.json.JsonPatch; +import jakarta.json.JsonValue; +import jakarta.json.JsonWriter; import jakarta.json.bind.Jsonb; import jakarta.json.bind.JsonbBuilder; import jakarta.json.bind.JsonbConfig; @@ -37,6 +42,8 @@ public final class ConfigWriter { + private static final JsonPatch NOOP_PATCH = JsonProvider.provider().createPatch(JsonValue.EMPTY_JSON_ARRAY); + private final ConfigAdapterContext context; public ConfigWriter( @@ -49,20 +56,38 @@ public void write( NamespaceConfig namespace, Writer writer) { - write0(namespace, writer); + write0(namespace, writer, NOOP_PATCH); + } + + public void write( + NamespaceConfig namespace, + Writer writer, + JsonPatch patch) + { + write0(namespace, writer, patch); } public String write( NamespaceConfig namespace) { StringWriter writer = new StringWriter(); - write0(namespace, writer); + write0(namespace, writer, NOOP_PATCH); + return writer.toString(); + } + + public String write( + NamespaceConfig namespace, + JsonPatch patch) + { + StringWriter writer = new StringWriter(); + write0(namespace, writer, patch); return writer.toString(); } private void write0( NamespaceConfig namespace, - Writer writer) + Writer writer, + JsonPatch patch) { List errors = new LinkedList<>(); @@ -79,9 +104,16 @@ private void write0( .withProvider(provider) .withConfig(config) .build(); - String jsonText = jsonb.toJson(namespace, NamespaceConfig.class); - JsonNode json = new ObjectMapper().readTree(jsonText); + + JsonObject jsonObject = provider.createReader(new StringReader(jsonText)).readObject(); + JsonObject patched = patch.apply(jsonObject); + StringWriter patchedText = new StringWriter(); + JsonWriter jsonWriter = provider.createWriter(patchedText); + jsonWriter.write(patched); + String patchedJson = patchedText.toString(); + + JsonNode json = new ObjectMapper().readTree(patchedJson); YAMLMapper mapper = YAMLMapper.builder() .disable(WRITE_DOC_START_MARKER) .enable(MINIMIZE_QUOTES) diff --git a/runtime/engine/src/main/moditect/module-info.java b/runtime/engine/src/main/moditect/module-info.java index 489e9acb67..d3db1ae724 100644 --- a/runtime/engine/src/main/moditect/module-info.java +++ b/runtime/engine/src/main/moditect/module-info.java @@ -39,6 +39,7 @@ requires transitive org.agrona.core; requires org.leadpony.justify; requires com.fasterxml.jackson.dataformat.yaml; + requires com.fasterxml.jackson.databind; requires jdk.unsupported; requires java.net.http; diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java index 321da79077..71498f7cc2 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/config/ConfigWriterTest.java @@ -22,6 +22,9 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import jakarta.json.Json; +import jakarta.json.JsonPatch; + import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -52,6 +55,7 @@ public void initYaml() @Test public void shouldWriteNamespace() { + // GIVEN NamespaceConfig config = NamespaceConfig.builder() .name("test") .binding() @@ -74,8 +78,10 @@ public void shouldWriteNamespace() .build() .build(); + // WHEN String text = yaml.write(config); + // THEN assertThat(text, not(nullValue())); assertThat(text, equalTo(String.join("\n", new String[] { @@ -93,4 +99,51 @@ public void shouldWriteNamespace() "" }))); } + + @Test + public void shouldPatchAndWriteNamespace() + { + // GIVEN + NamespaceConfig config = NamespaceConfig.builder() + .name("test") + .binding() + .name("test0") + .type("test") + .kind(SERVER) + .options(TestBindingOptionsConfig::builder) + .mode("test") + .build() + .route() + .when(TestConditionConfig::builder) + .match("test") + .build() + .exit("exit0") + .build() + .build() + .build(); + JsonPatch patch = Json.createPatchBuilder() + .replace("/bindings/test0/type", "newType") + .build(); + + // WHEN + String text = yaml.write(config, patch); + + // THEN + assertThat(text, not(nullValue())); + assertThat(text, equalTo(String.join("\n", + new String[] { + "name: test", + "bindings:", + " test0:", + " type: newType", + " kind: server", + " options:", + " mode: test", + " routes:", + " - exit: exit0", + " when:", + " - match: test", + "" + }))); + } } From 86794952371d4cb412fa2b5e513bc5d95e27806f Mon Sep 17 00:00:00 2001 From: John Fallows Date: Wed, 30 Aug 2023 15:56:37 -0700 Subject: [PATCH 067/115] Support configuration property definitions for custom type with constant default value (#382) --- .../amqp/internal/AmqpConfiguration.java | 2 +- .../internal/FileSystemConfiguration.java | 6 ++--- .../kafka/internal/KafkaConfiguration.java | 25 ++++++++----------- .../zilla/runtime/engine/Configuration.java | 16 ++++++++++-- 4 files changed, 29 insertions(+), 20 deletions(-) diff --git a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/AmqpConfiguration.java b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/AmqpConfiguration.java index dc8cb66592..47a5e38fb0 100644 --- a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/AmqpConfiguration.java +++ b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/AmqpConfiguration.java @@ -46,7 +46,7 @@ public class AmqpConfiguration extends Configuration AMQP_INITIAL_DEVIVERY_COUNT = config.property("initial.delivery.count", 0L); AMQP_CLOSE_EXCHANGE_TIMEOUT = config.property("close.exchange.timeout", 10000); AMQP_INCOMING_LOCALES = config.property(String[].class, "incoming.locales", - s -> s.split("\\s+"), c -> AMQP_INCOMING_LOCALES_DEFAULT); + s -> s.split("\\s+"), AMQP_INCOMING_LOCALES_DEFAULT); AMQP_CONFIG = config; } diff --git a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/FileSystemConfiguration.java b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/FileSystemConfiguration.java index 9f38a4e9c8..c9f1e7adc4 100644 --- a/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/FileSystemConfiguration.java +++ b/runtime/binding-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/filesystem/internal/FileSystemConfiguration.java @@ -30,8 +30,8 @@ public class FileSystemConfiguration extends Configuration static { final ConfigurationDef config = new ConfigurationDef(String.format("zilla.binding.%s", NAME)); - FILE_SYSTEM_SERVER_ROOT = - config.property(URI.class, "server.root", FileSystemConfiguration::createURI, p -> createURI(".")); + FILE_SYSTEM_SERVER_ROOT = config.property(URI.class, "server.root", + FileSystemConfiguration::decodeServerRoot, new File(".").toURI()); FILE_SYSTEM_CONFIG = config; } @@ -47,7 +47,7 @@ public URI serverRoot() return FILE_SYSTEM_SERVER_ROOT.get(this); } - private static URI createURI( + private static URI decodeServerRoot( String location) { return location.indexOf(':') != -1 ? URI.create(location) : new File(location).toURI(); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java index 54dffe1320..6ddfd3582d 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java @@ -75,6 +75,9 @@ public class KafkaConfiguration extends Configuration static { final ConfigurationDef config = new ConfigurationDef("zilla.binding.kafka"); + KAFKA_CLIENT_ID = config.property("client.id", "zilla"); + KAFKA_CLIENT_INSTANCE_ID = config.property(InstanceIdSupplier.class, "client.instance.id", + KafkaConfiguration::decodeInstanceId, KafkaConfiguration::defaultInstanceId); KAFKA_CLIENT_MAX_IDLE_MILLIS = config.property("client.max.idle.ms", 1 * 60 * 1000); KAFKA_CLIENT_META_MAX_AGE_MILLIS = config.property("client.meta.max.age.ms", 5 * 60 * 1000); KAFKA_CLIENT_DESCRIBE_MAX_AGE_MILLIS = config.property("client.describe.max.age.ms", 5 * 60 * 1000); @@ -84,6 +87,10 @@ public class KafkaConfiguration extends Configuration KAFKA_CLIENT_PRODUCE_MAX_REQUEST_MILLIS = config.property("client.produce.max.request.millis", 0); KAFKA_CLIENT_PRODUCE_MAX_RESPONSE_MILLIS = config.property("client.produce.max.response.millis", 120000); KAFKA_CLIENT_PRODUCE_MAX_BYTES = config.property("client.produce.max.bytes", Integer.MAX_VALUE); + KAFKA_CLIENT_SASL_SCRAM_NONCE = config.property(NonceSupplier.class, "client.sasl.scram.nonce", + KafkaConfiguration::decodeNonceSupplier, KafkaConfiguration::defaultNonceSupplier); + KAFKA_CLIENT_GROUP_REBALANCE_TIMEOUT = config.property(Duration.class, "client.group.rebalance.timeout", + (c, v) -> Duration.parse(v), "PT4S"); KAFKA_CACHE_DIRECTORY = config.property(Path.class, "cache.directory", KafkaConfiguration::cacheDirectory, KafkaBinding.NAME); KAFKA_CACHE_SERVER_BOOTSTRAP = config.property("cache.server.bootstrap", true); @@ -104,13 +111,6 @@ public class KafkaConfiguration extends Configuration KAFKA_CACHE_SEGMENT_BYTES = config.property("cache.segment.bytes", 0x40000000); KAFKA_CACHE_SEGMENT_INDEX_BYTES = config.property("cache.segment.index.bytes", 0xA00000); KAFKA_CACHE_CLIENT_TRAILERS_SIZE_MAX = config.property("cache.client.trailers.size.max", 256); - KAFKA_CLIENT_SASL_SCRAM_NONCE = config.property(NonceSupplier.class, "client.sasl.scram.nonce", - KafkaConfiguration::decodeNonceSupplier, KafkaConfiguration::defaultNonceSupplier); - KAFKA_CLIENT_GROUP_REBALANCE_TIMEOUT = config.property(Duration.class, "client.group.rebalance.timeout", - (c, v) -> Duration.parse(v), "PT4S"); - KAFKA_CLIENT_ID = config.property("client.id", "zilla"); - KAFKA_CLIENT_INSTANCE_ID = config.property(InstanceIdSupplier.class, "client.instance.id", - KafkaConfiguration::decodeInstanceId, KafkaConfiguration::defaultInstanceId); KAFKA_CONFIG = config; } @@ -300,8 +300,7 @@ private interface NonceSupplier } private static NonceSupplier decodeNonceSupplier( - Configuration config, - String value) + String value) { NonceSupplier supplier = null; @@ -335,15 +334,13 @@ private static NonceSupplier decodeNonceSupplier( return supplier; } - private static NonceSupplier defaultNonceSupplier( - Configuration config) + private static String defaultNonceSupplier() { - return () -> - new BigInteger(130, new SecureRandom()).toString(Character.MAX_RADIX); + return new BigInteger(130, new SecureRandom()).toString(Character.MAX_RADIX); } @FunctionalInterface - public interface InstanceIdSupplier extends Supplier + private interface InstanceIdSupplier extends Supplier { } diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/Configuration.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/Configuration.java index 3879d5fcd9..5be98c3238 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/Configuration.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/Configuration.java @@ -214,14 +214,14 @@ public CharPropertyDef property( public PropertyDef property( String name) { - return property(String.class, name, identity(), c -> null); + return property(String.class, name, identity(), (String) null); } public PropertyDef property( String name, String defaultValue) { - return property(String.class, name, identity(), c -> defaultValue); + return property(String.class, name, identity(), defaultValue); } public PropertyDef property( @@ -231,6 +231,18 @@ public PropertyDef property( return property(String.class, name, identity(), defaultValue); } + public PropertyDef property( + Class kind, + String name, + Function decodeValue, + T defaultValue) + { + String qualifiedName = qualifiedName(name); + PropertyDef property = new ObjectPropertyDef(kind, qualifiedName, decodeValue, defaultValue); + properties.put(qualifiedName, property); + return property; + } + public PropertyDef property( Class kind, String name, From ce995b5fc8c20840f44f13818d53a4dfcad73dea Mon Sep 17 00:00:00 2001 From: bmaidics Date: Thu, 31 Aug 2023 15:37:02 +0200 Subject: [PATCH 068/115] Fix mqtt-kafka publish bug (#383) --- .../kafka/publish.multiple.clients/client.rpt | 147 ++++++++++++++++++ .../kafka/publish.multiple.clients/server.rpt | 139 +++++++++++++++++ .../mqtt/publish.multiple.clients/client.rpt | 104 +++++++++++++ .../mqtt/publish.multiple.clients/server.rpt | 96 ++++++++++++ .../binding/mqtt/kafka/streams/KafkaIT.java | 9 ++ .../binding/mqtt/kafka/streams/MqttIT.java | 9 ++ .../stream/MqttKafkaPublishFactory.java | 42 ++--- .../stream/MqttKafkaPublishProxyIT.java | 10 ++ 8 files changed, 538 insertions(+), 18 deletions(-) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt new file mode 100644 index 0000000000..544185a2f2 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt @@ -0,0 +1,147 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write notify CLIENT1_CONNECTED +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client-1") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client-1") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client-1") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message3" +write flush + + +connect await CLIENT1_CONNECTED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") + .header("zilla:local", "client-2") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message1" +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") + .header("zilla:local", "client-2") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message2" +write flush + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") + .header("zilla:local", "client-2") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "message3" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt new file mode 100644 index 0000000000..0ca6e64b9e --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt @@ -0,0 +1,139 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client-1") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client-1") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client-1") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message3" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") + .header("zilla:local", "client-2") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message1" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") + .header("zilla:local", "client-2") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("sensor/two") + .header("zilla:filter", "sensor") + .header("zilla:filter", "two") + .header("zilla:local", "client-2") + .header("zilla:format", "TEXT") + .build() + .build()} + +read "message3" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt new file mode 100644 index 0000000000..bf7b5195af --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt @@ -0,0 +1,104 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client-1") + .topic("sensor/one") + .build() + .build()} + +connected + +write notify RECEIVED_REPLY_BEGIN +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +write "message1" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +write "message3" +write flush + +connect await RECEIVED_REPLY_BEGIN + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client-2") + .topic("sensor/two") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +write "message1" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +write "message2" +write flush + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +write "message3" +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt new file mode 100644 index 0000000000..a49dd2795d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt @@ -0,0 +1,96 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client-1") + .topic("sensor/one") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +read "message1" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +read "message3" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client-2") + .topic("sensor/two") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +read "message1" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +read "message2" + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} + +read "message3" diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java index cf100bd1f3..c9119665fb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java @@ -170,6 +170,15 @@ public void shouldSendMultipleMessages() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/publish.multiple.clients/client", + "${kafka}/publish.multiple.clients/server"}) + public void shouldSendMultipleClients() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${kafka}/publish.with.user.properties.distinct/client", diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java index 65827e7622..5e5c07f6ff 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java @@ -116,6 +116,15 @@ public void shouldSendMultipleMessages() throws Exception k3po.finish(); } + @Test + @Specification({ + "${mqtt}/publish.multiple.clients/client", + "${mqtt}/publish.multiple.clients/server"}) + public void shouldSendMultipleClients() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${mqtt}/publish.retained/client", diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java index bcba510b97..0499a5b37e 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java @@ -20,6 +20,7 @@ import java.util.function.LongFunction; import java.util.function.LongUnaryOperator; +import org.agrona.BitUtil; import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.concurrent.UnsafeBuffer; @@ -43,7 +44,6 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.BeginFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.DataFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.EndFW; -import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.ExtensionFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.FlushFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaDataExFW; @@ -62,7 +62,6 @@ public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory { private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); private static final KafkaAckMode KAFKA_DEFAULT_ACK_MODE = KafkaAckMode.LEADER_ONLY; - private static final String MQTT_TYPE_NAME = "mqtt"; private static final String KAFKA_TYPE_NAME = "kafka"; private static final byte SLASH_BYTE = (byte) '/'; @@ -85,18 +84,15 @@ public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory private final WindowFW.Builder windowRW = new WindowFW.Builder(); private final ResetFW.Builder resetRW = new ResetFW.Builder(); - private final ExtensionFW extensionRO = new ExtensionFW(); private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); - private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); - - private final MqttDataExFW.Builder mqttDataExRW = new MqttDataExFW.Builder(); private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); private final Array32FW.Builder kafkaHeadersRW = new Array32FW.Builder<>(new KafkaHeaderFW.Builder(), new KafkaHeaderFW()); + private final MutableDirectBuffer writeBuffer; private final MutableDirectBuffer extBuffer; private final MutableDirectBuffer kafkaHeadersBuffer; @@ -104,7 +100,6 @@ public class MqttKafkaPublishFactory implements MqttKafkaStreamFactory private final LongUnaryOperator supplyInitialId; private final LongUnaryOperator supplyReplyId; private final MqttKafkaHeaderHelper helper; - private final int mqttTypeId; private final int kafkaTypeId; private final LongFunction supplyBinding; private final String16FW binaryFormat; @@ -115,7 +110,6 @@ public MqttKafkaPublishFactory( EngineContext context, LongFunction supplyBinding) { - this.mqttTypeId = context.supplyTypeId(MQTT_TYPE_NAME); this.kafkaTypeId = context.supplyTypeId(KAFKA_TYPE_NAME); this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); @@ -183,7 +177,7 @@ private final class MqttPublishProxy private KafkaKeyFW key; - private OctetsFW[] topicNameHeaders; + private Array32FW topicNameHeaders; private OctetsFW clientIdOctets; private boolean retainAvailable; @@ -269,17 +263,32 @@ private void onMqttBegin( String topicName = mqttPublishBeginEx.topic().asString(); assert topicName != null; + + final String16FW clientId = mqttPublishBeginEx.clientId(); + final MutableDirectBuffer clientIdBuffer = new UnsafeBuffer(new byte[clientId.sizeof() + 2]); + this.clientIdOctets = new OctetsFW.Builder().wrap(clientIdBuffer, 0, clientIdBuffer.capacity()) + .set(clientId.value(), 0, mqttPublishBeginEx.clientId().length()).build(); + String[] topicHeaders = topicName.split("/"); - topicNameHeaders = new OctetsFW[topicHeaders.length]; + final OctetsFW[] topicNameHeaders = new OctetsFW[topicHeaders.length]; + + final int topicNameHeadersBufferSize = topicName.length() - (topicNameHeaders.length - 1) + + topicNameHeaders.length * 2 + BitUtil.SIZE_OF_INT + BitUtil.SIZE_OF_INT; //Array32FW count, length + final MutableDirectBuffer topicNameHeadersBuffer = new UnsafeBuffer(new byte[topicNameHeadersBufferSize]); + + final Array32FW.Builder topicNameHeadersRW = + new Array32FW.Builder<>(new String16FW.Builder(), new String16FW()); + topicNameHeadersRW.wrap(topicNameHeadersBuffer, 0, topicNameHeadersBuffer.capacity()); + for (int i = 0; i < topicHeaders.length; i++) { String16FW topicHeader = new String16FW(topicHeaders[i]); - topicNameHeaders[i] = new OctetsFW().wrap(topicHeader.value(), 0, topicHeader.length()); + topicNameHeadersRW.item(h -> h.set(topicHeader)); } - clientIdOctets = new OctetsFW() - .wrap(mqttPublishBeginEx.clientId().value(), 0, mqttPublishBeginEx.clientId().length()); - final DirectBuffer topicNameBuffer = mqttPublishBeginEx.topic().value(); + this.topicNameHeaders = topicNameHeadersRW.build(); + + final DirectBuffer topicNameBuffer = mqttPublishBeginEx.topic().value(); final MutableDirectBuffer keyBuffer = new UnsafeBuffer(new byte[topicNameBuffer.capacity() + 4]); key = new KafkaKeyFW.Builder() .wrap(keyBuffer, 0, keyBuffer.capacity()) @@ -326,10 +335,7 @@ private void onMqttData( final MqttPublishDataExFW mqttPublishDataEx = mqttDataEx.publish(); kafkaHeadersRW.wrap(kafkaHeadersBuffer, 0, kafkaHeadersBuffer.capacity()); - for (OctetsFW topicHeader : topicNameHeaders) - { - addHeader(helper.kafkaFilterHeaderName, topicHeader); - } + topicNameHeaders.forEach(th -> addHeader(helper.kafkaFilterHeaderName, th)); addHeader(helper.kafkaLocalHeaderName, clientIdOctets); diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java index 1ec9e315b8..d9f2922ff9 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java @@ -200,6 +200,16 @@ public void shouldSendMultipleMessages() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/publish.multiple.clients/client", + "${kafka}/publish.multiple.clients/server"}) + public void shouldSendMultipleClients() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Specification({ From 6b8ef760bb5a9769a1bd899271a253e86e6889d4 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Thu, 31 Aug 2023 15:41:27 +0200 Subject: [PATCH 069/115] Mqtt kafka will message delivery (#367) --- .../client.rpt | 16 + .../server.rpt | 17 + .../session.client.sent.reset/client.rpt | 14 + .../session.client.sent.reset/server.rpt | 15 + .../kafka/session.client.takeover/client.rpt | 46 + .../kafka/session.client.takeover/server.rpt | 47 + .../session.exists.clean.start/client.rpt | 33 + .../session.exists.clean.start/server.rpt | 34 + .../client.rpt | 14 + .../server.rpt | 15 + .../session.server.sent.reset/client.rpt | 14 + .../session.server.sent.reset/server.rpt | 15 + .../client.rpt | 14 + .../server.rpt | 15 + .../kafka/session.subscribe/client.rpt | 14 + .../kafka/session.subscribe/server.rpt | 16 +- .../client.rpt | 14 + .../server.rpt | 15 + .../client.rpt | 14 + .../server.rpt | 15 + .../client.rpt | 429 ++++ .../server.rpt | 421 +++ .../client.rpt | 405 +++ .../server.rpt | 400 +++ .../client.rpt | 126 + .../server.rpt | 126 + .../client.rpt | 197 ++ .../server.rpt | 199 ++ .../client.rpt | 303 +++ .../server.rpt | 307 +++ .../client.rpt | 411 +++ .../server.rpt | 411 +++ .../client.rpt | 337 +++ .../server.rpt | 336 +++ .../client.rpt | 56 + .../server.rpt | 57 + .../client.rpt | 56 + .../server.rpt | 57 + .../client.rpt | 55 + .../server.rpt | 56 + .../client.rpt | 7 + .../server.rpt | 7 + .../mqtt/session.client.takeover/client.rpt | 7 + .../mqtt/session.client.takeover/server.rpt | 6 + .../session.exists.clean.start/client.rpt | 15 + .../session.exists.clean.start/server.rpt | 12 + .../streams/mqtt/session.subscribe/client.rpt | 6 + .../streams/mqtt/session.subscribe/server.rpt | 6 + .../client.rpt | 12 + .../server.rpt | 12 + .../client.rpt | 6 + .../server.rpt | 6 + .../client.rpt | 52 + .../server.rpt | 53 + .../client.rpt | 53 + .../server.rpt | 55 + .../client.rpt | 47 + .../server.rpt | 49 + .../client.rpt | 52 + .../server.rpt | 52 + .../client.rpt | 51 + .../server.rpt | 51 + .../client.rpt | 79 + .../server.rpt | 78 + .../mqtt/session.will.message/client.rpt | 49 + .../mqtt/session.will.message/server.rpt | 49 + .../binding/mqtt/kafka/streams/KafkaIT.java | 90 + .../binding/mqtt/kafka/streams/MqttIT.java | 66 + .../mqtt/kafka/internal/InstanceId.java | 43 + .../mqtt/kafka/internal/MqttKafkaBinding.java | 5 +- .../internal/MqttKafkaBindingContext.java | 5 +- .../internal/MqttKafkaConfiguration.java | 159 +- .../config/MqttKafkaBindingConfig.java | 5 +- .../stream/MqttKafkaProxyFactory.java | 11 +- .../stream/MqttKafkaPublishFactory.java | 4 +- .../stream/MqttKafkaSessionFactory.java | 2256 +++++++++++++++-- .../internal/MqttKafkaConfigurationTest.java | 21 + .../stream/MqttKafkaPublishProxyIT.java | 20 + .../stream/MqttKafkaSessionProxyIT.java | 247 +- .../stream/MqttKafkaSubscribeProxyIT.java | 35 + .../binding/mqtt/internal/MqttFunctions.java | 96 +- .../main/resources/META-INF/zilla/mqtt.idl | 19 +- .../mqtt/internal/MqttFunctionsTest.java | 56 +- .../internal/stream/MqttServerFactory.java | 10 +- .../kafka/internal/KafkaFunctions.java | 32 +- .../kafka/internal/KafkaFunctionsTest.java | 3 + 86 files changed, 8956 insertions(+), 241 deletions(-) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt create mode 100644 incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt index 5104bd0301..d93c7548e5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -88,6 +89,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -100,6 +102,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() @@ -170,6 +184,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -231,6 +246,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt index 72b69fe1d9..47e979d3ba 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -82,6 +83,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -94,6 +96,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() @@ -156,6 +171,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -208,6 +224,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt index c9593137f9..090f55d3d1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -88,6 +89,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -100,6 +102,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt index dc43780c91..d7e156df05 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -81,6 +82,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -93,6 +95,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt index 398eb8c11c..6d3087baf0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -101,6 +102,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -113,6 +115,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() @@ -166,6 +180,24 @@ write zilla:data.ext ${kafka:dataEx() write zilla:data.empty write flush +# will signal for client-1, deliver at (now + delay) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(1000) + .instanceId("zilla-1") + .build()} +write flush + write close read closed @@ -208,6 +240,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -297,6 +330,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -309,6 +343,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt index 1c0f47f779..577dacf53d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -101,6 +102,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -113,6 +115,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() @@ -167,6 +182,23 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.empty +# will signal for client-1, deliver at (now + delay) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(1000) + .instanceId("zilla-1") + .build()} + read closed write close @@ -200,6 +232,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -288,6 +321,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -300,6 +334,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt index 47d321a2c2..b891f89fe2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -99,6 +100,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -111,6 +113,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() @@ -167,6 +181,23 @@ write zilla:data.ext ${kafka:dataEx() .build()} write zilla:data.empty +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(1000) + .instanceId("zilla-1") + .build()} +write flush + write close read closed @@ -209,6 +240,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -293,6 +325,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt index 116f1da2d8..7c2cf912f4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -99,6 +100,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -111,6 +113,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() @@ -168,6 +183,23 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.empty +# will signal for client-1, deliver at (now + delay) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(1000) + .instanceId("zilla-1") + .build()} + read closed write close @@ -203,6 +235,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -290,6 +323,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt index 232d3145da..702b7d22dc 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -89,6 +90,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -101,5 +103,17 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt index a8decf49d2..373d5ab9af 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -84,6 +85,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -96,6 +98,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush write notify CONNACK_TRIGGERED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt index 0f6ef624fc..010b8cde52 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -86,6 +87,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -98,6 +100,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt index 7e5a10c960..f6b3814dd7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -80,6 +81,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -92,6 +94,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt index 974063e3b7..5e03daea66 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -84,6 +85,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -96,6 +98,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush read zilla:data.ext ${kafka:matchDataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt index 4287d0b897..4c99172db9 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -76,6 +77,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -88,6 +90,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush write zilla:data.ext ${kafka:dataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt index c72a043fc6..45bdf2143f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -86,6 +87,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -98,6 +100,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt index 41577d3aef..f9cdaba867 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -81,6 +82,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -93,8 +95,20 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write advise zilla:flush +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null +write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt index 868fe4a5c5..7d254eec73 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -83,6 +84,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -95,6 +97,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt index d8a4504d4d..4e5eb2990b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -76,6 +77,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -88,6 +90,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt index 55efc89d01..030545d654 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt @@ -22,6 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -83,6 +84,7 @@ write zilla:begin.ext ${kafka:beginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -95,6 +97,18 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt index 9cf6f4f03e..2463ccf23c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt @@ -24,6 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -76,6 +77,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .merged() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") + .groupId("mqtt-clients") .filter() .key("client-1") .build() @@ -88,6 +90,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt new file mode 100644 index 0000000000..404416a76b --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt @@ -0,0 +1,429 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +read notify RECEIVED_WILL_DELIVER_AT_SIGNAL + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +read advised zilla:flush +read notify RECEIVED_WILL_SIGNAL_NOT_PRESENT + +write close +read closed + + +connect await RECEIVED_WILL_SIGNAL_NOT_PRESENT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_INITIAL_MIGRATE_SIGNAL + +write close +read closed + + +connect await SENT_INITIAL_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +read zilla:data.null +read notify RECEIVED_GROUP_MEMBERS_LEADER + +write abort + + +connect await RECEIVED_GROUP_MEMBERS_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .flags("RETAIN") + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} + +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write abort +read aborted + + +connect await RECEIVED_WILL_DELIVER_AT_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .flags("RETAIN") + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +read notify RECEIVED_SESSION_WILL_MESSAGE + +write close +read closed + + + +connect await RECEIVED_SESSION_WILL_MESSAGE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("obituaries") + .header("zilla:filter", "obituaries") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "client-1 disconnected abruptly" +write flush + +write close +read closed + + +connect await RECEIVED_SESSION_WILL_MESSAGE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("obituaries") + .header("zilla:filter", "obituaries") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "client-1 disconnected abruptly" +write flush + +write close +read closed + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt new file mode 100644 index 0000000000..0680352986 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt @@ -0,0 +1,421 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected +read notify WILL_STREAM_STARTED + +write await RECEIVED_WILL_CANCELLATION_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write await RECEIVED_WILL_DELIVER_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +# will signal for client-1, deliver at (now + delay) +write await RECEIVED_WILL_DELIVER_AT_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +# cleanup will message +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +read zilla:data.null + +# cleanup will signal +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + + +# non-clean start +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +# no will signals +write advise zilla:flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected +# receive sender-1 migrate signal + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# send group members (leader) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +write flush + +read aborted + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_WILL_CANCELLATION_SIGNAL + +# will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .flags("RETAIN") + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} + +# will signal for client-1, deliver later +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +# no session state +# no migrate signals +write advise zilla:flush + +# will signal for client-1, deliver at (now + delay) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +write notify RECEIVED_WILL_DELIVER_AT_SIGNAL + +read aborted +write abort + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +# send session will message for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .flags("RETAIN") + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +# deliver will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("obituaries") + .header("zilla:filter", "obituaries") + .header("zilla:format", "TEXT") + .build() + .build()} +read "client-1 disconnected abruptly" + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_retained") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +# deliver will message for client-1 (retained) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("obituaries") + .header("zilla:filter", "obituaries") + .header("zilla:format", "TEXT") + .build() + .build()} +read "client-1 disconnected abruptly" + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt new file mode 100644 index 0000000000..2435e95f5f --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt @@ -0,0 +1,405 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +read notify RECEIVED_WILL_DELIVER_AT_SIGNAL + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +read advised zilla:flush +read notify RECEIVED_WILL_SIGNAL_NOT_PRESENT + +write close +read closed + + +connect await RECEIVED_WILL_SIGNAL_NOT_PRESENT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_INITIAL_MIGRATE_SIGNAL + +write close +read closed + + +connect await SENT_INITIAL_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +read zilla:data.null +read notify RECEIVED_GROUP_MEMBERS_LEADER + +write abort + + +connect await RECEIVED_GROUP_MEMBERS_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .expiryInterval(15000) + .format("TEXT") + .responseTopic("responses/client1") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +# no will signals +# no session state +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} + +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write abort +read aborted + + +connect await RECEIVED_WILL_DELIVER_AT_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .expiryInterval(15000) + .format("TEXT") + .responseTopic("responses/client1") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} +read notify RECEIVED_SESSION_WILL_MESSAGE + +write close +read closed + + + +connect await RECEIVED_SESSION_WILL_MESSAGE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("obituaries") + .header("zilla:filter", "obituaries") + .headerInt("zilla:timeout-ms", 15000) + .header("zilla:format", "TEXT") + .header("zilla:reply-to", "mqtt_messages") + .header("zilla:reply-key", "responses/client1") + .header("zilla:reply-filter", "responses") + .header("zilla:reply-filter", "client1") + .header("zilla:correlation-id", "info") + .build() + .build()} + +write "client-1 disconnected abruptly" +write flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt new file mode 100644 index 0000000000..3c650329ee --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt @@ -0,0 +1,400 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected +read notify WILL_STREAM_STARTED + +write await RECEIVED_WILL_CANCELLATION_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write await RECEIVED_WILL_DELIVER_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +# will signal for client-1, deliver at (now + delay) +write await RECEIVED_WILL_DELIVER_AT_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +# cleanup will message +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +read zilla:data.null + +# cleanup will signal +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + + +# non-clean start +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +# no will signals +write advise zilla:flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected +# receive sender-1 migrate signal + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# send group members (leader) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +write flush + +read aborted + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_WILL_CANCELLATION_SIGNAL + +# will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .expiryInterval(15000) + .format("TEXT") + .responseTopic("responses/client1") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} + +# will signal for client-1, deliver later +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +# no session state +# no migrate signals +write advise zilla:flush + +# will signal for client-1, deliver at (now + delay) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +write notify RECEIVED_WILL_DELIVER_AT_SIGNAL + +read aborted +write abort + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +# send session will message for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .expiryInterval(15000) + .format("TEXT") + .responseTopic("responses/client1") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +# deliver will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("obituaries") + .header("zilla:filter", "obituaries") + .headerInt("zilla:timeout-ms", 15000) + .header("zilla:format", "TEXT") + .header("zilla:reply-to", "mqtt_messages") + .header("zilla:reply-key", "responses/client1") + .header("zilla:reply-filter", "responses") + .header("zilla:reply-filter", "client1") + .header("zilla:correlation-id", "info") + .build() + .build()} +read "client-1 disconnected abruptly" + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt new file mode 100644 index 0000000000..2b23b0a6fe --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt @@ -0,0 +1,126 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(deliverAt) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +read notify RECEIVED_WILL_DELIVER_AT_SIGNAL +read notify WAIT_1_SECOND + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + + +connect await RECEIVED_WILL_DELIVER_AT_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +read notify RECEIVED_WILL + +write close +read closed + +connect await RECEIVED_WILL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt new file mode 100644 index 0000000000..abdb9b95eb --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt @@ -0,0 +1,126 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +property delayMillis 2000L +property deliverAt ${mqtt:timestamp() + delayMillis} + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(deliverAt) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write await WAIT_1_SECOND +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +# send session will message for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +read closed +write close + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt new file mode 100644 index 0000000000..3c8a37b63a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt @@ -0,0 +1,197 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_INITIAL_MIGRATE_SIGNAL + +write close +read closed + + +connect await SENT_INITIAL_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +read zilla:data.null +read notify RECEIVED_GROUP_MEMBERS_LEADER + +write abort + + +connect await RECEIVED_GROUP_MEMBERS_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(0) + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt new file mode 100644 index 0000000000..28825c38d5 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt @@ -0,0 +1,199 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected +read notify WILL_STREAM_STARTED + +write await RECEIVED_WILL_DELIVER_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + + +# no will lifetimeId fetch due to clean-start +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# receive sender-1 migrate signal +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# send group members (leader) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# no will cancellation signal due to clean-start + +# will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .delay(0) + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} + +# will signal for client-1, deliver later +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +# no session state +# no migrate signals +write advise zilla:flush + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt new file mode 100644 index 0000000000..9f8bd8aab2 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt @@ -0,0 +1,303 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .instanceId("zilla-1") + .build()} +read notify RECEIVED_WILL_SIGNAL + +read advised zilla:flush + +write close +read closed + + +connect await RECEIVED_WILL_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_INITIAL_MIGRATE_SIGNAL + +write close +read closed + + +connect await SENT_INITIAL_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +read zilla:data.null +read notify RECEIVED_GROUP_MEMBERS_LEADER + +write close + + +connect await RECEIVED_GROUP_MEMBERS_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-7ce005a0-ce9d-444d-b14b-2f302d13799d") + .hashKey("client-1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(0) + .format("TEXT") + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-7ce005a0-ce9d-444d-b14b-2f302d13799d") + .hashKey("client-1") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt new file mode 100644 index 0000000000..4e75d563cd --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt @@ -0,0 +1,307 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected +read notify WILL_STREAM_STARTED + +write await RECEIVED_WILL_CANCELLATION_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write await RECEIVED_WILL_DELIVER_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write await RECEIVED_WILL_SIGNAL_CLEANUP +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write flush + + +# non-clean start +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +# existing will signal for client-1, use lifetimeId +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .instanceId("zilla-1") + .build()} +write flush + +write advise zilla:flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected +# receive sender-1 migrate signal + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# send group members (leader) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +write flush + +read closed + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_WILL_CANCELLATION_SIGNAL + +# will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-7ce005a0-ce9d-444d-b14b-2f302d13799d") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .delay(0) + .format("TEXT") + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} + +# will signal for client-1, deliver later +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +read notify RECEIVED_WILL_DELIVER_LATER_SIGNAL + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +# no session state +# no migrate signals +write advise zilla:flush + +# cleanup will message +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-7ce005a0-ce9d-444d-b14b-2f302d13799d") + .hashKey("client-1") + .build() + .build()} +read zilla:data.null + +# cleanup will signal +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +write notify RECEIVED_WILL_SIGNAL_CLEANUP + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt new file mode 100644 index 0000000000..86ed617384 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt @@ -0,0 +1,411 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +read notify RECEIVED_WILL_DELIVER_AT_SIGNAL + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +read advised zilla:flush +read notify RECEIVED_WILL_SIGNAL_NOT_PRESENT + +write close +read closed + + +connect await RECEIVED_WILL_SIGNAL_NOT_PRESENT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_INITIAL_MIGRATE_SIGNAL + +write close +read closed + + +connect await SENT_INITIAL_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +read zilla:data.null +read notify RECEIVED_GROUP_MEMBERS_LEADER + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(2) + .build() + .build()} + +write close + + +connect await RECEIVED_GROUP_MEMBERS_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} + +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write close +read closed + + +connect await RECEIVED_WILL_DELIVER_AT_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +read notify RECEIVED_SESSION_WILL_MESSAGE + +write close +read closed + + + +connect await RECEIVED_SESSION_WILL_MESSAGE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("obituaries") + .header("zilla:filter", "obituaries") + .header("zilla:format", "TEXT") + .build() + .build()} + +write "client-1 disconnected abruptly" +write flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt new file mode 100644 index 0000000000..b9da4c6e7e --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt @@ -0,0 +1,411 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected +read notify WILL_STREAM_STARTED + +write await RECEIVED_WILL_CANCELLATION_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write await RECEIVED_WILL_DELIVER_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +# will signal for client-1, deliver at (now + delay) +write await RECEIVED_WILL_DELIVER_AT_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +# cleanup will message +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +read zilla:data.null + +# cleanup will signal +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + + +# non-clean start +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +# no will signals +write advise zilla:flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected +# receive sender-1 migrate signal + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# send group members (leader) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +write flush + +write await RECEIVED_WILL_DELIVER_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(2) + .build() + .build()} +write flush + +read closed + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_WILL_CANCELLATION_SIGNAL + +# will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} + +# will signal for client-1, deliver later +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +# no session state +# no migrate signals +write advise zilla:flush + +# migrate signal +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +# will signal for client-1, deliver at (now + delay) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +read notify RECEIVED_WILL_DELIVER_AT_SIGNAL + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +# send session will message for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .format("TEXT") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_messages") + .partition(-1, -2) + .ackMode("LEADER_ONLY") + .build() + .build()} + + +connected + +# deliver will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("obituaries") + .header("zilla:filter", "obituaries") + .header("zilla:format", "TEXT") + .build() + .build()} +read "client-1 disconnected abruptly" + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/client.rpt new file mode 100644 index 0000000000..64e32ca7c3 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/client.rpt @@ -0,0 +1,337 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +read notify RECEIVED_WILL_DELIVER_AT_SIGNAL + + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +read advised zilla:flush +read notify RECEIVED_WILL_SIGNAL_NOT_PRESENT + +write close +read closed + + +connect await RECEIVED_WILL_SIGNAL_NOT_PRESENT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_INITIAL_MIGRATE_SIGNAL + +write close +read closed + + +connect await SENT_INITIAL_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +read zilla:data.null +read notify RECEIVED_GROUP_MEMBERS_LEADER + +write abort + + +connect await RECEIVED_GROUP_MEMBERS_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .expiryInterval(15000) + .format("TEXT") + .responseTopic("responses/client1") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} + +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +write abort +read aborted + + +connect await RECEIVED_WILL_DELIVER_AT_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .expiryInterval(15000) + .format("TEXT") + .responseTopic("responses/client1") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("different willId") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} + +write close +read closed + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/server.rpt new file mode 100644 index 0000000000..901b37f204 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/server.rpt @@ -0,0 +1,336 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected +read notify WILL_STREAM_STARTED + +write await RECEIVED_WILL_CANCELLATION_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write await RECEIVED_WILL_DELIVER_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + +# will signal for client-1, different willId +write await RECEIVED_WILL_DELIVER_AT_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} +write flush + + +# non-clean start +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#will-signal") + .build() + .build() + .build()} + +connected + +# no will signals +write advise zilla:flush + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected +# receive sender-1 migrate signal + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# send group members (leader) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_WILL_CANCELLATION_SIGNAL + +# will message for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .delay(1000) + .expiryInterval(15000) + .format("TEXT") + .responseTopic("responses/client1") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} + +# will signal for client-1, deliver later +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL + +read advised zilla:flush ${kafka:matchFlushEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +# no session state +# no migrate signals +write advise zilla:flush + +# will signal for client-1, deliver at (now + delay) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .build() + .build()} +read ${mqtt:willSignal() + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .instanceId("zilla-1") + .build()} + +write notify RECEIVED_WILL_DELIVER_AT_SIGNAL + +read aborted +write abort + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_sessions") + .filter() + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build() + .build()} + +connected + +# send session will message for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .delay(1000) + .expiryInterval(15000) + .format("TEXT") + .responseTopic("responses/client1") + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("different willId") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} +write flush +# willId != willId from the will-signal so we don't send will message + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt new file mode 100644 index 0000000000..104337d267 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt @@ -0,0 +1,56 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read aborted +read notify RECEIVED_ABORT +write abort + +connect await RECEIVED_ABORT + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt new file mode 100644 index 0000000000..5e8609f387 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt @@ -0,0 +1,57 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +write abort +read aborted + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt new file mode 100644 index 0000000000..2b4698bafc --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt @@ -0,0 +1,56 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read closed +read notify RECEIVED_CLOSE +write close + +connect await RECEIVED_CLOSE + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt new file mode 100644 index 0000000000..3bfdcf16c4 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt @@ -0,0 +1,57 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +write close +read closed + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt new file mode 100644 index 0000000000..f2c9e2f9d1 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt @@ -0,0 +1,55 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +write aborted +read notify SENT_RESET + +connect await SENT_RESET + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt new file mode 100644 index 0000000000..d637b63167 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt @@ -0,0 +1,56 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + +read abort + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .build() + .build()} + +connected + + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt index 58c446c456..1d83b99862 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt @@ -29,6 +29,13 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt index 1505e82923..3aa61ab5c0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt @@ -33,6 +33,13 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + read ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt index 5e04d0a12f..047113b2db 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt @@ -27,6 +27,13 @@ write zilla:begin.ext ${mqtt:beginEx() connected +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt index d4e46af36c..756f369c1c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt @@ -32,6 +32,12 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} read ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt index d5d7c03eec..6fe5bec3e8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt @@ -13,6 +13,7 @@ # specific language governing permissions and limitations under the License. # + connect "zilla://streams/mqtt0" option zilla:window 8192 option zilla:transmission "duplex" @@ -31,6 +32,13 @@ read zilla:data.empty write notify READ_SESSION_EMPTY +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + write ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -73,6 +81,7 @@ connect await SESSION_READY write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() + .flags("CLEAN_START") .expiry(1) .clientId("client-1") .build() @@ -84,4 +93,10 @@ read ${mqtt:session() .subscription("sensor/one", 1) .build()} +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} write zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt index f79622e0db..6475a7af1f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt @@ -32,6 +32,12 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} read ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -76,4 +82,10 @@ write ${mqtt:session() .build()} write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt index 1919464e2e..d5fa2ae743 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt @@ -31,6 +31,12 @@ read zilla:data.empty write notify READ_EMPTY_STATE +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} write ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt index dc487358ef..4e745cf720 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt @@ -32,6 +32,12 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} read ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt index 1d9b28e1ab..a3e5b43a95 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt @@ -31,6 +31,12 @@ read zilla:data.empty write notify READ_EMPTY_STATE +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} write ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -39,6 +45,12 @@ read ${mqtt:session() .subscription("sensor/one", 1) .build()} +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} write ${mqtt:session() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt index 9a66280e9e..a5a01e3656 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt @@ -33,6 +33,12 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} read ${mqtt:session() .subscription("sensor/one", 1) .build()} @@ -42,6 +48,12 @@ write ${mqtt:session() .build()} write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} read ${mqtt:session() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt index 936ce63059..12f50776ef 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt @@ -30,6 +30,12 @@ connected read zilla:data.empty +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} write ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt index 0d80c4b9a7..cf6802be63 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt @@ -33,6 +33,12 @@ connected write zilla:data.empty write flush +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} read ${mqtt:session() .subscription("sensor/one", 1) .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt new file mode 100644 index 0000000000..668d5303aa --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect await WILL_STREAM_STARTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +write ${mqtt:will() + .topic("obituaries") + .delay(1) + .flags("RETAIN") + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +read zilla:data.empty + +write abort +read aborted + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt new file mode 100644 index 0000000000..a9513c2214 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt @@ -0,0 +1,53 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1) + .flags("RETAIN") + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} + +write zilla:data.empty +write flush + +read aborted +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt new file mode 100644 index 0000000000..8c8e901c5e --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt @@ -0,0 +1,53 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect await WILL_STREAM_STARTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +write ${mqtt:will() + .topic("obituaries") + .delay(1) + .expiryInterval(15) + .format("TEXT") + .responseTopic("responses/client1") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +read zilla:data.empty + +write abort +read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt new file mode 100644 index 0000000000..d2b66cf1c7 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt @@ -0,0 +1,55 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1) + .expiryInterval(15) + .format("TEXT") + .responseTopic("responses/client1") + .correlation("info") + .payload("client-1 disconnected abruptly") + .build()} + +write zilla:data.empty +write flush + +read aborted +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt new file mode 100644 index 0000000000..09ad926815 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect await WILL_STREAM_STARTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL", "CLEAN_START") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +read zilla:data.empty + +write zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt new file mode 100644 index 0000000000..f8ab41ce05 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL", "CLEAN_START") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} + +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/client.rpt new file mode 100644 index 0000000000..ce3007b13a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/client.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect await WILL_STREAM_STARTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +write ${mqtt:will() + .topic("obituaries") + .delay(1) + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} +write flush + + +#We only expect this, after the will-signal and will message was saved to Kafka +read zilla:data.empty + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/server.rpt new file mode 100644 index 0000000000..ed9cae3310 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/server.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1) + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} + +write zilla:data.empty +write flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt new file mode 100644 index 0000000000..490bc13a57 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt @@ -0,0 +1,51 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect await WILL_STREAM_STARTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +write ${mqtt:will() + .topic("obituaries") + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} +write flush + + +#We only expect this, after the will-signal and will message was saved to Kafka +read zilla:data.empty + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt new file mode 100644 index 0000000000..c2fae0e893 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt @@ -0,0 +1,51 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} + +write zilla:data.empty +write flush + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt new file mode 100644 index 0000000000..f58852f608 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt @@ -0,0 +1,79 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("one") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} +write flush + +read zilla:data.empty +read notify RECEIVED_CONNACK_TRIGGER + +read closed +write close + +connect await RECEIVED_CONNACK_TRIGGER + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("one") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} +write ${mqtt:will() + .topic("obituaries") + .format("TEXT") + .payload("client-1 disappeared abruptly") + .build()} +write flush + +read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt new file mode 100644 index 0000000000..f26b19e7d5 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt @@ -0,0 +1,78 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("one") + .build() + .build()} + +connected + + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} + +write zilla:data.empty +write flush + +write close +read closed + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("one") + .build() + .build()} +connected + + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} +read ${mqtt:will() + .topic("obituaries") + .format("TEXT") + .payload("client-1 disappeared abruptly") + .build()} + +write zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt new file mode 100644 index 0000000000..88290a7fc1 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect await WILL_STREAM_STARTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +write ${mqtt:will() + .topic("obituaries") + .delay(1) + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} +write flush + + +#We only expect this, after the will-signal and will message was saved to Kafka +read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt new file mode 100644 index 0000000000..4f102e37ef --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL") + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("WILL") + .build() + .build()} + +read ${mqtt:will() + .topic("obituaries") + .delay(1) + .format("TEXT") + .payload("client-1 disconnected abruptly") + .build()} + +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java index c9119665fb..ff22cf0721 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java @@ -574,4 +574,94 @@ public void shouldGroupStreamReceiveServerSentReset() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${kafka}/session.will.message.abort.deliver.will/client", + "${kafka}/session.will.message.abort.deliver.will/server"}) + public void shouldSendWillMessageOnAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.message.normal.disconnect/client", + "${kafka}/session.will.message.normal.disconnect/server"}) + public void shouldNotSendWillMessageOnNormalDisconnect() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.message.abort.deliver.will.retain/client", + "${kafka}/session.will.message.abort.deliver.will.retain/server"}) + public void shouldSaveWillMessageAsRetain() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.message.clean.start/client", + "${kafka}/session.will.message.clean.start/server"}) + public void shouldGenerateLifeTimeIdOnCleanStart() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.message.will.id.mismatch.no.deliver/client", + "${kafka}/session.will.message.will.id.mismatch.no.deliver/server"}) + public void shouldNotSendWillMessageOnWillIdMismatch() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.message.takeover.deliver.will/client", + "${kafka}/session.will.message.takeover.deliver.will/server"}) + public void shouldSendWillMessageOnSessionTakeover() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.message.cancel.delivery/client", + "${kafka}/session.will.message.cancel.delivery/server"}) + public void shouldCancelWillDelivery() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.stream.end.reconnect/client", + "${kafka}/session.will.stream.end.reconnect/server"}) + public void shouldReconnectWillStreamOnKafkaEnd() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.stream.abort.reconnect/client", + "${kafka}/session.will.stream.abort.reconnect/server"}) + public void shouldReconnectWillStreamOnKafkaAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.will.stream.reset.reconnect/client", + "${kafka}/session.will.stream.reset.reconnect/server"}) + public void shouldReconnectWillStreamOnKafkaReset() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java index 5e5c07f6ff..87a06466ac 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java @@ -493,4 +493,70 @@ public void shouldSessionStreamReceiveServerSentReset() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${mqtt}/session.will.message.abort.deliver.will/client", + "${mqtt}/session.will.message.abort.deliver.will/server"}) + public void shouldSendWillMessageOnAbort() throws Exception + { + k3po.start(); + k3po.notifyBarrier("WILL_STREAM_STARTED"); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.will.message.normal.disconnect/client", + "${mqtt}/session.will.message.normal.disconnect/server"}) + public void shouldNotSendWillMessageOnNormalDisconnect() throws Exception + { + k3po.start(); + k3po.notifyBarrier("WILL_STREAM_STARTED"); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.will.message.abort.deliver.will.retain/client", + "${mqtt}/session.will.message.abort.deliver.will.retain/server"}) + public void shouldSaveWillMessageAsRetain() throws Exception + { + k3po.start(); + k3po.notifyBarrier("WILL_STREAM_STARTED"); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.will.message.clean.start/client", + "${mqtt}/session.will.message.clean.start/server"}) + public void shouldSendWillMessageOnClientReconnectCleanStart() throws Exception + { + k3po.start(); + k3po.notifyBarrier("WILL_STREAM_STARTED"); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.will.message.client.takeover.deliver.will/client", + "${mqtt}/session.will.message.client.takeover.deliver.will/server"}) + public void shouldSendWillMessageOnAbortClientTakeover() throws Exception + { + k3po.start(); + k3po.notifyBarrier("WILL_STREAM_STARTED"); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.will.message/client", + "${mqtt}/session.will.message/server"}) + public void shouldSaveWillMessage() throws Exception + { + k3po.start(); + k3po.notifyBarrier("WILL_STREAM_STARTED"); + k3po.finish(); + } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java new file mode 100644 index 0000000000..fcb5bc86ca --- /dev/null +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java @@ -0,0 +1,43 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal; + +import java.util.function.Supplier; + +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; + + +public class InstanceId +{ + private final Supplier supplyInstanceId; + private volatile String16FW instanceId; + + InstanceId( + Supplier supplyInstanceId) + { + this.supplyInstanceId = supplyInstanceId; + regenerate(); + } + + public void regenerate() + { + instanceId = new String16FW(supplyInstanceId.get()); + } + + public String16FW instanceId() + { + return instanceId; + } +} diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java index 1bddbf2258..e6c756481c 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java @@ -24,10 +24,12 @@ public class MqttKafkaBinding implements Binding public static final String NAME = "mqtt-kafka"; private final MqttKafkaConfiguration config; + private final InstanceId instanceId; MqttKafkaBinding( MqttKafkaConfiguration config) { + this.instanceId = new InstanceId(config.instanceId()); this.config = config; } @@ -47,6 +49,7 @@ public URL type() public MqttKafkaBindingContext supply( EngineContext context) { - return new MqttKafkaBindingContext(config, context); + return new MqttKafkaBindingContext(config, context, instanceId); } + } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java index 3a613eff0c..6ea46f8dc3 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java @@ -33,9 +33,10 @@ public class MqttKafkaBindingContext implements BindingContext MqttKafkaBindingContext( MqttKafkaConfiguration config, - EngineContext context) + EngineContext context, + InstanceId instanceId) { - this.factories = singletonMap(PROXY, new MqttKafkaProxyFactory(config, context)); + this.factories = singletonMap(PROXY, new MqttKafkaProxyFactory(config, context, instanceId)); } @Override diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java index 440919173d..f04b2a3b27 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java @@ -14,8 +14,13 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal; -import java.lang.reflect.Method; +import static java.time.Instant.now; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.util.UUID; +import java.util.function.LongSupplier; import java.util.function.Supplier; import org.agrona.LangUtil; @@ -24,12 +29,19 @@ public class MqttKafkaConfiguration extends Configuration { + public static final String MQTT_CLIENTS_GROUP_ID = "mqtt-clients"; private static final ConfigurationDef MQTT_KAFKA_CONFIG; public static final PropertyDef MESSAGES_TOPIC; public static final PropertyDef RETAINED_MESSAGES_TOPIC; public static final PropertyDef SESSIONS_TOPIC; - public static final PropertyDef SESSION_ID; + public static final PropertyDef SESSION_ID; + public static final PropertyDef WILL_ID; + public static final PropertyDef LIFETIME_ID; + public static final PropertyDef INSTANCE_ID; + public static final PropertyDef TIME; + public static final BooleanPropertyDef WILL_AVAILABLE; + public static final IntPropertyDef WILL_STREAM_RECONNECT_DELAY; static { @@ -37,8 +49,18 @@ public class MqttKafkaConfiguration extends Configuration MESSAGES_TOPIC = config.property("messages.topic", "mqtt_messages"); RETAINED_MESSAGES_TOPIC = config.property("retained.messages.topic", "mqtt_retained"); SESSIONS_TOPIC = config.property("sessions.topic", "mqtt_sessions"); - SESSION_ID = config.property(SessionIdSupplier.class, "session.id", - MqttKafkaConfiguration::decodeSessionIdSupplier, MqttKafkaConfiguration::defaultSessionIdSupplier); + SESSION_ID = config.property(StringSupplier.class, "session.id", + MqttKafkaConfiguration::decodeStringSupplier, MqttKafkaConfiguration::defaultSessionId); + WILL_ID = config.property(StringSupplier.class, "will.id", + MqttKafkaConfiguration::decodeStringSupplier, MqttKafkaConfiguration::defaultWillId); + LIFETIME_ID = config.property(StringSupplier.class, "lifetime.id", + MqttKafkaConfiguration::decodeStringSupplier, MqttKafkaConfiguration::defaultLifetimeId); + INSTANCE_ID = config.property(StringSupplier.class, "instance.id", + MqttKafkaConfiguration::decodeStringSupplier, MqttKafkaConfiguration::defaultInstanceId); + TIME = config.property(LongSupplier.class, "time", + MqttKafkaConfiguration::decodeLongSupplier, MqttKafkaConfiguration::defaultTime); + WILL_AVAILABLE = config.property("will.available", true); + WILL_STREAM_RECONNECT_DELAY = config.property("will.stream.reconnect", 2); MQTT_KAFKA_CONFIG = config; } @@ -48,59 +70,138 @@ public MqttKafkaConfiguration( super(MQTT_KAFKA_CONFIG, config); } - public Supplier sessionIdSupplier() + @FunctionalInterface + public interface StringSupplier extends Supplier + { + } + + public Supplier sessionId() { return SESSION_ID.get(this); } - @FunctionalInterface - public interface SessionIdSupplier extends Supplier + public Supplier willId() + { + return WILL_ID.get(this); + } + + public Supplier lifetimeId() { + return LIFETIME_ID.get(this); } - private static SessionIdSupplier decodeSessionIdSupplier( - Configuration config, - String value) + public Supplier instanceId() { + return INSTANCE_ID.get(this); + } + + public LongSupplier time() + { + return TIME.get(this); + } + + public boolean willAvailable() + { + return WILL_AVAILABLE.get(this); + } + + public int willStreamReconnectDelay() + { + return WILL_STREAM_RECONNECT_DELAY.getAsInt(this); + } + + private static StringSupplier decodeStringSupplier( + String fullyQualifiedMethodName) + { + StringSupplier supplier = null; + try { - String className = value.substring(0, value.indexOf("$$Lambda")); - Class lambdaClass = Class.forName(className); - - Method targetMethod = null; - for (Method method : lambdaClass.getDeclaredMethods()) + MethodType signature = MethodType.methodType(String.class); + String[] parts = fullyQualifiedMethodName.split("::"); + Class ownerClass = Class.forName(parts[0]); + String methodName = parts[1]; + MethodHandle method = MethodHandles.publicLookup().findStatic(ownerClass, methodName, signature); + supplier = () -> { - if (method.isSynthetic()) + String value = null; + try { - targetMethod = method; - break; + value = (String) method.invoke(); } - } + catch (Throwable ex) + { + LangUtil.rethrowUnchecked(ex); + } + + return value; + }; + } + catch (Throwable ex) + { + LangUtil.rethrowUnchecked(ex); + } + + return supplier; + } + + private static LongSupplier decodeLongSupplier( + String fullyQualifiedMethodName) + { + LongSupplier supplier = null; - Method finalTargetMethod = targetMethod; - return () -> + try + { + MethodType signature = MethodType.methodType(long.class); + String[] parts = fullyQualifiedMethodName.split("::"); + Class ownerClass = Class.forName(parts[0]); + String methodName = parts[1]; + MethodHandle method = MethodHandles.publicLookup().findStatic(ownerClass, methodName, signature); + supplier = () -> { + long value = 0; try { - finalTargetMethod.setAccessible(true); - return (String) finalTargetMethod.invoke(null); + value = (long) method.invoke(); } - catch (Exception e) + catch (Throwable ex) { - throw new RuntimeException("Failed to invoke the lambda method.", e); + LangUtil.rethrowUnchecked(ex); } + + return value; }; } catch (Throwable ex) { LangUtil.rethrowUnchecked(ex); } - return null; + + return supplier; } - private static SessionIdSupplier defaultSessionIdSupplier( - Configuration config) + private static String defaultInstanceId() + { + return String.format("%s-%s", "zilla", UUID.randomUUID()); + } + + private static String defaultSessionId() + { + return String.format("%s-%s", "zilla", UUID.randomUUID()); + } + + private static String defaultWillId() + { + return String.format("%s", UUID.randomUUID()); + } + + private static String defaultLifetimeId() + { + return String.format("%s", UUID.randomUUID()); + } + + private static long defaultTime() { - return () -> String.format("%s-%s", "zilla", UUID.randomUUID()); + return now().toEpochMilli(); } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java index 0e5ad114f9..165d247fec 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.Optional; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionFactory; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; @@ -26,16 +27,16 @@ public class MqttKafkaBindingConfig { public final long id; - public final String entry; public final KindConfig kind; public final MqttKafkaOptionsConfig options; public final List routes; + public MqttKafkaSessionFactory.KafkaWillProxy willProxy; + public MqttKafkaBindingConfig( BindingConfig binding) { this.id = binding.id; - this.entry = binding.entry; this.kind = binding.kind; this.options = Optional.ofNullable(binding.options) .map(MqttKafkaOptionsConfig.class::cast) diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java index fb094cee35..08e8367cbf 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java @@ -14,10 +14,13 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.WILL_STREAM_RECONNECT_DELAY; + import org.agrona.DirectBuffer; import org.agrona.collections.Int2ObjectHashMap; import org.agrona.collections.Long2ObjectHashMap; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.InstanceId; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaBindingConfig; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.OctetsFW; @@ -42,7 +45,8 @@ public class MqttKafkaProxyFactory implements MqttKafkaStreamFactory public MqttKafkaProxyFactory( MqttKafkaConfiguration config, - EngineContext context) + EngineContext context, + InstanceId instanceId) { final Long2ObjectHashMap bindings = new Long2ObjectHashMap<>(); final Int2ObjectHashMap factories = new Int2ObjectHashMap<>(); @@ -54,7 +58,7 @@ public MqttKafkaProxyFactory( config, context, bindings::get); final MqttKafkaSessionFactory sessionFactory = new MqttKafkaSessionFactory( - config, context, bindings::get); + config, context, instanceId, bindings::get, WILL_STREAM_RECONNECT_DELAY); factories.put(MqttBeginExFW.KIND_PUBLISH, publishFactory); factories.put(MqttBeginExFW.KIND_SUBSCRIBE, subscribeFactory); @@ -79,9 +83,8 @@ public void attach( public void detach( long bindingId) { - bindings.remove(bindingId); - factories.values().forEach(streamFactory -> streamFactory.onDetached(bindingId)); + bindings.remove(bindingId); } @Override diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java index 0499a5b37e..c8a34e2025 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java @@ -352,7 +352,7 @@ private void onMqttData( }); } - if (mqttPublishDataEx.contentType().asString() != null) + if (mqttPublishDataEx.contentType().length() != -1) { addHeader(helper.kafkaContentTypeHeaderName, mqttPublishDataEx.contentType()); } @@ -362,7 +362,7 @@ private void onMqttData( addHeader(helper.kafkaFormatHeaderName, mqttPublishDataEx.format()); } - if (mqttPublishDataEx.responseTopic().asString() != null) + if (mqttPublishDataEx.responseTopic().length() != -1) { final String16FW responseTopic = mqttPublishDataEx.responseTopic(); addHeader(helper.kafkaReplyToHeaderName, kafkaMessagesTopic); diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index dc11a6bb44..d0f027f3d2 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -14,27 +14,54 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.MQTT_CLIENTS_GROUP_ID; +import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; +import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; +import static java.lang.System.currentTimeMillis; import static java.time.Instant.now; import static java.util.concurrent.TimeUnit.SECONDS; +import static org.agrona.BitUtil.SIZE_OF_INT; +import static org.agrona.BitUtil.SIZE_OF_LONG; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.util.Optional; +import java.util.concurrent.TimeUnit; import java.util.function.LongFunction; +import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; import java.util.function.Supplier; import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; +import org.agrona.collections.IntHashSet; import org.agrona.collections.Long2ObjectHashMap; +import org.agrona.collections.LongArrayList; +import org.agrona.collections.Object2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.InstanceId; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaHeaderHelper; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.config.MqttKafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaAckMode; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaCapabilities; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaEvaluation; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaHeaderFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaKeyFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetType; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormat; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormatFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPublishFlags; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSessionFlags; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSessionStateFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttWillDeliverAt; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttWillMessageFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttWillSignalFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.AbortFW; @@ -45,25 +72,42 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.FlushFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaGroupDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.SignalFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.Configuration.IntPropertyDef; import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.binding.BindingHandler; import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory { + private static final byte SLASH_BYTE = (byte) '/'; private static final KafkaAckMode KAFKA_DEFAULT_ACK_MODE = KafkaAckMode.LEADER_ONLY; private static final String KAFKA_TYPE_NAME = "kafka"; + private static final String MQTT_TYPE_NAME = "mqtt"; private static final String MIGRATE_KEY_POSTFIX = "#migrate"; + private static final String WILL_SIGNAL_KEY_POSTFIX = "#will-signal"; + private static final String WILL_KEY_POSTFIX = "#will-"; private static final String GROUP_PROTOCOL = "highlander"; private static final String16FW SENDER_ID_NAME = new String16FW("sender-id"); + private static final String16FW TYPE_HEADER_NAME = new String16FW("type"); + private static final String16FW WILL_SIGNAL_NAME = new String16FW("will-signal"); private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); private static final int DATA_FLAG_COMPLETE = 0x03; + private static final int SIGNAL_DELIVER_WILL_MESSAGE = 1; + private static final int SIGNAL_CONNECT_WILL_STREAM = 2; + private static final int SIZE_OF_UUID = 38; private final BeginFW beginRO = new BeginFW(); private final DataFW dataRO = new DataFW(); @@ -76,9 +120,14 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final EndFW.Builder endRW = new EndFW.Builder(); private final AbortFW.Builder abortRW = new AbortFW.Builder(); private final FlushFW.Builder flushRW = new FlushFW.Builder(); + private final MqttWillMessageFW.Builder mqttMessageRW = new MqttWillMessageFW.Builder(); + private final MqttWillSignalFW.Builder mqttWillSignalRW = new MqttWillSignalFW.Builder(); + private final Array32FW.Builder kafkaHeadersRW = + new Array32FW.Builder<>(new KafkaHeaderFW.Builder(), new KafkaHeaderFW()); private final WindowFW windowRO = new WindowFW(); private final ResetFW resetRO = new ResetFW(); + private final SignalFW signalRO = new SignalFW(); private final WindowFW.Builder windowRW = new WindowFW.Builder(); private final ResetFW.Builder resetRW = new ResetFW.Builder(); @@ -86,33 +135,80 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final ExtensionFW extensionRO = new ExtensionFW(); private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); private final MqttSessionStateFW mqttSessionStateRO = new MqttSessionStateFW(); + private final MqttWillSignalFW mqttWillSignalRO = new MqttWillSignalFW(); + private final MqttWillMessageFW mqttWillRO = new MqttWillMessageFW(); + private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); + private final KafkaFlushExFW kafkaFlushExRO = new KafkaFlushExFW(); private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); + private final String16FW binaryFormat = new String16FW(MqttPayloadFormat.BINARY.name()); + private final String16FW textFormat = new String16FW(MqttPayloadFormat.TEXT.name()); + private final MutableDirectBuffer writeBuffer; private final MutableDirectBuffer extBuffer; + private final MutableDirectBuffer kafkaHeadersBuffer; + private final MutableDirectBuffer willMessageBuffer; + private final MutableDirectBuffer willSignalBuffer; + private final MutableDirectBuffer willKeyBuffer; + private final MutableDirectBuffer willSignalKeyBuffer; + private final BufferPool bufferPool; private final BindingHandler streamFactory; + private final Signaler signaler; private final LongUnaryOperator supplyInitialId; private final LongUnaryOperator supplyReplyId; private final int kafkaTypeId; + private final int mqttTypeId; private final LongFunction supplyBinding; private final Supplier supplySessionId; + private final Supplier supplyWillId; + private final Supplier supplyLifetimeId; + private final LongSupplier supplyTime; private final Long2ObjectHashMap sessionIds; + private final MqttKafkaHeaderHelper helper; + private final int coreIndex; + private final Supplier supplyTraceId; + private final Object2ObjectHashMap willDeliverIds; + private final InstanceId instanceId; + private final boolean willAvailable; + private final int reconnectDelay; + private int reconnectAttempt; public MqttKafkaSessionFactory( MqttKafkaConfiguration config, EngineContext context, - LongFunction supplyBinding) + InstanceId instanceId, + LongFunction supplyBinding, + IntPropertyDef reconnectDelay) { this.kafkaTypeId = context.supplyTypeId(KAFKA_TYPE_NAME); + this.mqttTypeId = context.supplyTypeId(MQTT_TYPE_NAME); this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.kafkaHeadersBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.willMessageBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.willSignalBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.willKeyBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.willSignalKeyBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.helper = new MqttKafkaHeaderHelper(); this.streamFactory = context.streamFactory(); + this.signaler = context.signaler(); this.supplyInitialId = context::supplyInitialId; this.supplyReplyId = context::supplyReplyId; this.supplyBinding = supplyBinding; - this.supplySessionId = config.sessionIdSupplier(); + this.supplySessionId = config.sessionId(); + this.supplyWillId = config.willId(); + this.supplyLifetimeId = config.lifetimeId(); + this.supplyTime = config.time(); + this.supplyTraceId = context::supplyTraceId; this.sessionIds = new Long2ObjectHashMap<>(); + this.coreIndex = context.index(); + this.willAvailable = config.willAvailable(); + this.willDeliverIds = new Object2ObjectHashMap<>(); + this.instanceId = instanceId; + this.reconnectDelay = reconnectDelay.getAsInt(config); } @Override @@ -150,6 +246,16 @@ public MessageConsumer newStream( public void onAttached( long bindingId) { + MqttKafkaBindingConfig binding = supplyBinding.apply(bindingId); + if (willAvailable && coreIndex == 0) + { + Optional route = binding.routes.stream().findFirst(); + final long routeId = route.map(mqttKafkaRouteConfig -> mqttKafkaRouteConfig.id).orElse(0L); + + binding.willProxy = new KafkaWillProxy(binding.id, routeId, + binding.sessionsTopic(), binding.messagesTopic(), binding.retainedTopic()); + binding.willProxy.doKafkaBegin(currentTimeMillis()); + } sessionIds.put(bindingId, supplySessionId.get()); } @@ -157,7 +263,14 @@ public void onAttached( public void onDetached( long bindingId) { + MqttKafkaBindingConfig binding = supplyBinding.apply(bindingId); sessionIds.remove(bindingId); + + if (binding.willProxy != null) + { + binding.willProxy.doKafkaEnd(supplyTraceId.get(), 0); + binding.willProxy = null; + } } private final class MqttSessionProxy @@ -167,12 +280,11 @@ private final class MqttSessionProxy private final long routedId; private final long initialId; private final long replyId; - private final KafkaGroupProxy group; private final String16FW sessionId; private final String16FW sessionsTopic; - - + private String lifetimeId; private KafkaSessionProxy session; + private KafkaGroupProxy group; private int state; private long initialSeq; @@ -187,6 +299,10 @@ private final class MqttSessionProxy private String16FW clientId; private String16FW clientIdMigrate; private int sessionExpiryMillis; + private int sessionFlags; + private int willPadding; + private String willId; + private int delay; private MqttSessionProxy( MessageConsumer mqtt, @@ -202,8 +318,7 @@ private MqttSessionProxy( this.routedId = routedId; this.initialId = initialId; this.replyId = supplyReplyId.applyAsLong(initialId); - this.session = new KafkaSessionSignalProxy(originId, resolvedId, this); - this.group = new KafkaGroupProxy(originId, resolvedId, this); + this.session = new KafkaFetchWillSignalProxy(originId, resolvedId, this); this.sessionsTopic = sessionsTopic; this.sessionId = new String16FW(sessionIds.get(bindingId)); } @@ -274,8 +389,21 @@ private void onMqttBegin( final int sessionExpiry = mqttSessionBeginEx.expiry(); sessionExpiryMillis = mqttSessionBeginEx.expiry() == 0 ? Integer.MAX_VALUE : (int) SECONDS.toMillis(sessionExpiry); - session.doKafkaBeginIfNecessary(traceId, authorization, affinity, null, clientIdMigrate, sessionId); - group.doKafkaBegin(traceId, authorization, affinity); + sessionFlags = mqttSessionBeginEx.flags(); + + if (!isSetWillFlag(sessionFlags) || isSetCleanStart(sessionFlags)) + { + final long routedId = session.routedId; + session = new KafkaSessionSignalProxy(originId, routedId, this); + } + if (isSetWillFlag(sessionFlags)) + { + final int willSignalSize = clientId.sizeof() + SIZE_OF_INT + SIZE_OF_LONG + SIZE_OF_UUID + SIZE_OF_UUID + + instanceId.instanceId().sizeof(); + willPadding = willSignalSize + SIZE_OF_UUID + SIZE_OF_UUID; + } + + session.doKafkaBeginIfNecessary(traceId, authorization, affinity); } private void onMqttData( @@ -286,8 +414,9 @@ private void onMqttData( final long traceId = data.traceId(); final long authorization = data.authorization(); final long budgetId = data.budgetId(); - final int reserved = data.reserved(); final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW extension = data.extension(); final OctetsFW payload = data.payload(); assert acknowledge <= sequence; @@ -301,29 +430,153 @@ private void onMqttData( final int offset = payload.offset(); final int limit = payload.limit(); - MqttSessionStateFW sessionState = mqttSessionStateRO.tryWrap(buffer, offset, limit); - - Flyweight kafkaDataEx = kafkaDataExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m - .deferred(0) - .timestamp(now().toEpochMilli()) - .partition(p -> p.partitionId(-1).partitionOffset(-1)) - .key(b -> b.length(clientId.length()) - .value(clientId.value(), 0, clientId.length()))) - .build(); - if (sessionState != null) - { - session.doKafkaData(traceId, authorization, budgetId, reserved, flags, sessionState, kafkaDataEx); - } - else + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final MqttDataExFW mqttDataEx = + dataEx != null && dataEx.typeId() == mqttTypeId ? extension.get(mqttDataExRO::tryWrap) : null; + final MqttSessionDataExFW mqttSessionDataEx = + mqttDataEx != null && mqttDataEx.kind() == MqttDataExFW.KIND_SESSION ? mqttDataEx.session() : null; + + Flyweight kafkaDataEx; + Flyweight kafkaPayload; + if (mqttSessionDataEx != null) { - session.doKafkaData(traceId, authorization, budgetId, reserved, flags, EMPTY_OCTETS, kafkaDataEx); + switch (mqttSessionDataEx.kind().get()) + { + case WILL: + if (lifetimeId == null) + { + lifetimeId = supplyLifetimeId.get(); + } + this.willId = supplyWillId.get(); + + String16FW key = new String16FW.Builder().wrap(willKeyBuffer, 0, willKeyBuffer.capacity()) + .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, StandardCharsets.UTF_8).build(); + kafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(key.length()) + .value(key.value(), 0, key.length())) + .hashKey(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length()))) + .build(); + + MqttWillMessageFW will = mqttWillRO.tryWrap(buffer, offset, limit); + this.delay = (int) Math.min(SECONDS.toMillis(will.delay()), sessionExpiryMillis); + final int expiryInterval = will.expiryInterval() == -1 ? -1 : + (int) TimeUnit.SECONDS.toMillis(will.expiryInterval()); + final MqttWillMessageFW.Builder willMessageBuilder = + mqttMessageRW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity()) + .topic(will.topic()) + .delay(delay) + .qos(will.qos()) + .flags(will.flags()) + .expiryInterval(expiryInterval) + .contentType(will.contentType()) + .format(will.format()) + .responseTopic(will.responseTopic()) + .lifetimeId(lifetimeId) + .willId(willId) + .correlation(will.correlation()) + .properties(will.properties()) + .payload(will.payload()); + + kafkaPayload = willMessageBuilder.build(); + session.doKafkaData(traceId, authorization, budgetId, + kafkaPayload.sizeof(), flags, kafkaPayload, kafkaDataEx); + + + String16FW willSignalKey = new String16FW.Builder() + .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + Flyweight willSignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(willSignalKey.length()) + .value(willSignalKey.value(), 0, willSignalKey.length())) + .hashKey(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME.length()) + .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) + .valueLen(WILL_SIGNAL_NAME.length()) + .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + .build(); + + final MqttWillSignalFW willSignal = + mqttWillSignalRW.wrap(willSignalBuffer, 0, willSignalBuffer.capacity()) + .clientId(clientId) + .delay(delay) + .deliverAt(MqttWillDeliverAt.UNKNOWN.value()) + .lifetimeId(lifetimeId) + .willId(willId) + .instanceId(instanceId.instanceId()) + .build(); + + session.doKafkaData(traceId, authorization, budgetId, willSignal.sizeof(), flags, + willSignal, willSignalKafkaDataEx); + + doFlushProduceAndFetchWithFilter(traceId, authorization, budgetId); + break; + case STATE: + kafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length()))) + .build(); + + kafkaPayload = payload.sizeof() > 0 ? mqttSessionStateRO.wrap(buffer, offset, limit) : EMPTY_OCTETS; + + session.doKafkaData(traceId, authorization, budgetId, + reserved, flags, kafkaPayload, kafkaDataEx); + break; + } } } + private void doFlushProduceAndFetchWithFilter( + long traceId, + long authorization, + long budgetId) + { + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> + { + m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)); + m.filtersItem(f -> f.conditionsItem(ci -> + ci.key(kb -> kb.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())))); + m.filtersItem(f -> + { + f.conditionsItem(ci -> + ci.key(kb -> kb.length(clientIdMigrate.length()) + .value(clientIdMigrate.value(), 0, clientIdMigrate.length()))); + f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + h.nameLen(SENDER_ID_NAME.length()) + .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) + .valueLen(sessionId.length()) + .value(sessionId.value(), 0, sessionId.length()))))); + }); + }) + .build(); + + session.doKafkaFlush(traceId, authorization, budgetId, 0, kafkaFlushEx); + } private void onMqttEnd( EndFW end) @@ -341,8 +594,57 @@ private void onMqttEnd( assert initialAck <= initialSeq; - session.doKafkaEnd(traceId, initialSeq, authorization); - group.doKafkaEnd(traceId, initialSeq, authorization); + if (isSetWillFlag(sessionFlags)) + { + // Cleanup will message + will signal + String16FW key = new String16FW.Builder().wrap(willKeyBuffer, 0, willKeyBuffer.capacity()) + .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, StandardCharsets.UTF_8).build(); + Flyweight kafkaWillDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(key.length()) + .value(key.value(), 0, key.length())) + .hashKey(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length()))) + .build(); + + session.doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + null, kafkaWillDataEx); + + String16FW willSignalKey = new String16FW.Builder() + .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + Flyweight willSignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(willSignalKey.length()) + .value(willSignalKey.value(), 0, willSignalKey.length())) + .hashKey(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME.length()) + .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) + .valueLen(WILL_SIGNAL_NAME.length()) + .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + .build(); + + session.doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + null, willSignalKafkaDataEx); + } + + session.doKafkaEnd(traceId, authorization); + if (group != null) + { + group.doKafkaEnd(traceId, authorization); + } } private void onMqttAbort( @@ -361,8 +663,15 @@ private void onMqttAbort( assert initialAck <= initialSeq; + if (isSetWillFlag(sessionFlags)) + { + session.sendWillSignal(traceId, authorization); + } session.doKafkaAbort(traceId, authorization); - group.doKafkaAbort(traceId, authorization); + if (group != null) + { + group.doKafkaAbort(traceId, authorization); + } } private void onMqttReset( @@ -385,7 +694,10 @@ private void onMqttReset( assert replyAck <= replySeq; session.doKafkaReset(traceId); - group.doKafkaReset(traceId); + if (group != null) + { + group.doKafkaReset(traceId); + } } private void onMqttWindow( @@ -399,6 +711,7 @@ private void onMqttWindow( final long budgetId = window.budgetId(); final int padding = window.padding(); final int capabilities = window.capabilities(); + final boolean wasOpen = MqttKafkaState.replyOpened(state); assert acknowledge <= sequence; assert sequence <= replySeq; @@ -413,7 +726,7 @@ private void onMqttWindow( assert replyAck <= replySeq; session.doKafkaWindow(traceId, authorization, budgetId, capabilities); - if (sequence == 0) + if (!wasOpen && group != null) { group.doKafkaWindow(traceId, authorization, budgetId, padding, capabilities); } @@ -526,124 +839,81 @@ private void doMqttReset( } } - private abstract class KafkaSessionProxy + public final class KafkaWillProxy { - protected MessageConsumer kafka; - protected final long originId; - protected final long routedId; - protected long initialId; - protected long replyId; - protected final MqttSessionProxy delegate; - - protected int state; + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final String16FW sessionsTopic; + private final String16FW messagesTopic; + private final String16FW retainedTopic; + private final Object2ObjectHashMap willFetchers; - protected long initialSeq; - protected long initialAck; - protected int initialMax; + private IntHashSet partitions; + private int state; - protected long replySeq; - protected long replyAck; - protected int replyMax; - protected int replyPad; + private long replySeq; + private long replyAck; + private int replyMax; + private long reconnectAt; - private KafkaSessionProxy( + private KafkaWillProxy( long originId, long routedId, - MqttSessionProxy delegate) + String16FW sessionsTopic, + String16FW messagesTopic, + String16FW retainedTopic) { this.originId = originId; this.routedId = routedId; - this.delegate = delegate; this.initialId = supplyInitialId.applyAsLong(routedId); + this.sessionsTopic = sessionsTopic; + this.messagesTopic = messagesTopic; + this.retainedTopic = retainedTopic; this.replyId = supplyReplyId.applyAsLong(initialId); - } + this.willFetchers = new Object2ObjectHashMap<>(); + this.partitions = new IntHashSet(); - private void doKafkaBeginIfNecessary( - long traceId, - long authorization, - long affinity, - String16FW clientId, - String16FW clientIdMigrate, - String16FW sessionIdentifier) - { - if (!MqttKafkaState.initialOpening(state)) - { - doKafkaBegin(traceId, authorization, affinity, clientId, clientIdMigrate, sessionIdentifier); - } } private void doKafkaBegin( - long traceId, - long authorization, - long affinity, - String16FW clientId, - String16FW clientIdMigrate, - String16FW sessionIdentifier) + long timeMillis) { - assert state == 0; - - this.initialId = supplyInitialId.applyAsLong(routedId); - this.replyId = supplyReplyId.applyAsLong(initialId); - - state = MqttKafkaState.openingInitial(state); - - kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, delegate.sessionsTopic, clientId, clientIdMigrate, sessionIdentifier); - } - - private void doKafkaData( - long traceId, - long authorization, - long budgetId, - int reserved, - int flags, - OctetsFW payload, - Flyweight extension) - { - - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, payload, extension); - - initialSeq += reserved; - - assert initialSeq <= initialAck + initialMax; + this.reconnectAt = signaler.signalAt( + timeMillis, + SIGNAL_CONNECT_WILL_STREAM, + this::onSignalConnectWillStream); } - private void doKafkaData( + private void doKafkaBegin( long traceId, long authorization, - long budgetId, - int reserved, - int flags, - MqttSessionStateFW sessionState, - Flyweight extension) + long affinity) { - final DirectBuffer buffer = sessionState.buffer(); - final int offset = sessionState.offset(); - final int limit = sessionState.limit(); - final int length = limit - offset; - - doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, flags, reserved, buffer, offset, length, extension); + reconnectAttempt = 0; + willFetchers.values().forEach(f -> f.cleanup(traceId, authorization)); + willFetchers.clear(); - initialSeq += reserved; + state = MqttKafkaState.openingInitial(state); - assert initialSeq <= initialAck + initialMax; + kafka = newWillStream(this::onWillMessage, originId, routedId, initialId, 0, 0, 0, + traceId, authorization, affinity, sessionsTopic); } private void doKafkaEnd( long traceId, - long sequence, long authorization) { if (!MqttKafkaState.initialClosed(state)) { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; state = MqttKafkaState.closeInitial(state); - doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + doEnd(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); + + signaler.cancel(reconnectAt); + reconnectAt = NO_CANCEL_ID; } } @@ -653,16 +923,13 @@ private void doKafkaAbort( { if (!MqttKafkaState.initialClosed(state)) { - initialSeq = delegate.initialSeq; - initialAck = delegate.initialAck; - initialMax = delegate.initialMax; state = MqttKafkaState.closeInitial(state); - doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + doAbort(kafka, originId, routedId, initialId, 0, 0, 0, traceId, authorization); } } - private void onKafkaMessage( + private void onWillMessage( int msgTypeId, DirectBuffer buffer, int index, @@ -678,6 +945,10 @@ private void onKafkaMessage( final DataFW data = dataRO.wrap(buffer, index, index + length); onKafkaData(data); break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; case EndFW.TYPE_ID: final EndFW end = endRO.wrap(buffer, index, index + length); onKafkaEnd(end); @@ -686,14 +957,6 @@ private void onKafkaMessage( final AbortFW abort = abortRO.wrap(buffer, index, index + length); onKafkaAbort(abort); break; - case FlushFW.TYPE_ID: - final FlushFW flush = flushRO.wrap(buffer, index, index + length); - onKafkaFlush(flush); - break; - case WindowFW.TYPE_ID: - final WindowFW window = windowRO.wrap(buffer, index, index + length); - onKafkaWindow(window); - break; case ResetFW.TYPE_ID: final ResetFW reset = resetRO.wrap(buffer, index, index + length); onKafkaReset(reset); @@ -721,9 +984,7 @@ private void onKafkaBegin( state = MqttKafkaState.openingReply(state); assert replyAck <= replySeq; - - delegate.doMqttBegin(traceId, authorization, affinity); - doKafkaWindow(traceId, authorization, 0, 0); + doKafkaWindow(traceId, authorization, 0, 0, 0); } private void onKafkaData( @@ -733,6 +994,7 @@ private void onKafkaData( final long acknowledge = data.acknowledge(); final long traceId = data.traceId(); final long authorization = data.authorization(); + final long budgetId = data.budgetId(); final int reserved = data.reserved(); assert acknowledge <= sequence; @@ -741,30 +1003,141 @@ private void onKafkaData( replySeq = sequence + reserved; assert replyAck <= replySeq; - if (replySeq > replyAck + replyMax) { doKafkaReset(traceId); - delegate.doMqttAbort(traceId, authorization); } else { - handleKafkaData(data); + final OctetsFW extension = data.extension(); + final OctetsFW payload = data.payload(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; + + fetchWill: + if (key != null) + { + if (payload == null) + { + final String clientId0 = key.value() + .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o - WILL_SIGNAL_KEY_POSTFIX.length())); + String16FW clientId = new String16FW(clientId0); + if (willDeliverIds.containsKey(clientId)) + { + willDeliverIds.get(clientId).forEach(signaler::cancel); + KafkaFetchWillProxy willFetcher = willFetchers.get(clientId); + if (willFetcher != null) + { + willFetcher.cleanup(traceId, authorization); + } + } + break fetchWill; + } + MqttWillSignalFW willSignal = + mqttWillSignalRO.tryWrap(payload.buffer(), payload.offset(), payload.limit()); + + if (willSignal != null) + { + long deliverAt = willSignal.deliverAt(); + final String16FW clientId = willSignal.clientId(); + + if (deliverAt == MqttWillDeliverAt.UNKNOWN.value()) + { + if (!instanceId.instanceId().equals(willSignal.instanceId())) + { + deliverAt = supplyTime.getAsLong() + willSignal.delay(); + } + else + { + break fetchWill; + } + } + + KafkaFetchWillProxy willFetcher = new KafkaFetchWillProxy(originId, routedId, this, sessionsTopic, + clientId, willSignal.willId().asString(), willSignal.lifetimeId().asString(), deliverAt); + willFetcher.doKafkaBegin(traceId, authorization, 0, willSignal.lifetimeId()); + willFetchers.put(clientId, willFetcher); + } + } } } - protected abstract void handleKafkaData(DataFW data); + private void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); - protected abstract void onKafkaWindow(WindowFW window); + assert acknowledge <= sequence; + assert sequence >= replySeq; - protected void onKafkaEnd( - EndFW end) + replySeq = sequence; + + assert replyAck <= replySeq; + + final OctetsFW extension = flush.extension(); + final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); + final KafkaFlushExFW kafkaFlushEx = + flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; + final KafkaMergedFlushExFW kafkaMergedFlushEx = + kafkaFlushEx != null && kafkaFlushEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaFlushEx.merged() : null; + final Array32FW progress = kafkaMergedFlushEx != null ? kafkaMergedFlushEx.progress() : null; + + if (progress != null) + { + final IntHashSet newPartitions = new IntHashSet(); + progress.forEach(p -> newPartitions.add(p.partitionId())); + if (!newPartitions.equals(partitions)) + { + instanceId.regenerate(); + partitions = newPartitions; + } + } + } + + private void onSignalConnectWillStream( + int signalId) { + assert signalId == SIGNAL_CONNECT_WILL_STREAM; + + this.reconnectAt = NO_CANCEL_ID; + doKafkaBegin(supplyTraceId.get(), 0, 0); } - protected void onKafkaFlush( - FlushFW flush) + private void onKafkaEnd( + EndFW end) { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + doKafkaEnd(traceId, authorization); + + if (reconnectDelay != 0) + { + if (reconnectAt != NO_CANCEL_ID) + { + signaler.cancel(reconnectAt); + } + + reconnectAt = signaler.signalAt( + currentTimeMillis() + SECONDS.toMillis(reconnectDelay), + SIGNAL_CONNECT_WILL_STREAM, + this::onSignalConnectWillStream); + } } private void onKafkaAbort( @@ -783,30 +1156,20 @@ private void onKafkaAbort( assert replyAck <= replySeq; - delegate.doMqttAbort(traceId, authorization); - } + doKafkaAbort(traceId, authorization); - protected void sendMigrateSignal(long authorization, long traceId) - { - Flyweight kafkaMigrateDataEx = kafkaDataExRW - .wrap(extBuffer, 0, extBuffer.capacity()) - .typeId(kafkaTypeId) - .merged(m -> m - .deferred(0) - .timestamp(now().toEpochMilli()) - .partition(p -> p.partitionId(-1).partitionOffset(-1)) - .key(b -> b.length(delegate.clientIdMigrate.length()) - .value(delegate.clientIdMigrate.value(), 0, delegate.clientIdMigrate.length())) - .hashKey(b -> b.length(delegate.clientId.length()) - .value(delegate.clientId.value(), 0, delegate.clientId.length())) - .headersItem(c -> c.nameLen(SENDER_ID_NAME.length()) - .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) - .valueLen(delegate.sessionId.length()) - .value(delegate.sessionId.value(), 0, delegate.sessionId.length()))) - .build(); + if (reconnectDelay != 0) + { + if (reconnectAt != NO_CANCEL_ID) + { + signaler.cancel(reconnectAt); + } - doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, - EMPTY_OCTETS, kafkaMigrateDataEx); + reconnectAt = signaler.signalAt( + currentTimeMillis() + SECONDS.toMillis(reconnectDelay), + SIGNAL_CONNECT_WILL_STREAM, + this::onSignalConnectWillStream); + } } private void onKafkaReset( @@ -814,16 +1177,1188 @@ private void onKafkaReset( { final long sequence = reset.sequence(); final long acknowledge = reset.acknowledge(); - final long traceId = reset.traceId(); assert acknowledge <= sequence; - assert acknowledge >= delegate.initialAck; - delegate.initialAck = acknowledge; - - assert delegate.initialAck <= delegate.initialSeq; + if (reconnectDelay != 0) + { + if (reconnectAt != NO_CANCEL_ID) + { + signaler.cancel(reconnectAt); + } - delegate.doMqttReset(traceId); + reconnectAt = signaler.signalAt( + currentTimeMillis() + Math.min(50 << reconnectAttempt++, SECONDS.toMillis(reconnectDelay)), + SIGNAL_CONNECT_WILL_STREAM, + this::onSignalConnectWillStream); + } + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyMax = 8192; + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, 0, capabilities); + } + + + private void doKafkaData( + long traceId, + long authorization, + Flyweight extension) + { + + doData(kafka, originId, routedId, initialId, 0, 0, 0, + traceId, authorization, 0, DATA_FLAG_COMPLETE, 0, null, extension); + } + } + + private final class KafkaFetchWillProxy + { + private final KafkaWillProxy delegate; + private final String16FW topic; + private final String16FW clientId; + private final String lifetimeId; + private final String willId; + private final long deliverAt; + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private int dataSlot = NO_SLOT; + private int messageSlotOffset; + private int messageSlotReserved; + private KafkaProduceWillProxy willProducer; + private KafkaProduceWillProxy willRetainProducer; + private int willMessageAckCount; + + private KafkaFetchWillProxy( + long originId, + long routedId, + KafkaWillProxy delegate, + String16FW topic, + String16FW clientId, + String willId, + String lifetimeId, + long deliverAt) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.topic = topic; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.clientId = clientId; + this.willId = willId; + this.lifetimeId = lifetimeId; + this.deliverAt = deliverAt; + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity, + String16FW lifetimeId) + { + if (!MqttKafkaState.initialOpening(state)) + { + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, clientId, lifetimeId, topic); + } + } + + private void cleanup( + long traceId, + long authorization) + { + doKafkaEnd(traceId, authorization); + if (willProducer != null) + { + willProducer.doKafkaEnd(traceId, authorization); + } + if (willRetainProducer != null) + { + willRetainProducer.doKafkaEnd(traceId, authorization); + } + bufferPool.release(dataSlot); + dataSlot = NO_SLOT; + messageSlotOffset = 0; + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + delegate.willFetchers.remove(clientId); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + doKafkaWindow(traceId, authorization, 0, 0, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + doKafkaReset(traceId); + } + else + { + final OctetsFW extension = data.extension(); + final OctetsFW payload = data.payload(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; + + if (key != null && payload != null) + { + MqttWillMessageFW willMessage = + mqttWillRO.tryWrap(payload.buffer(), payload.offset(), payload.limit()); + + if (willId.equals(willMessage.willId().asString())) + { + if (dataSlot == NO_SLOT) + { + dataSlot = bufferPool.acquire(initialId); + } + + if (dataSlot == NO_SLOT) + { + doKafkaAbort(traceId, authorization); + } + + + final MutableDirectBuffer dataBuffer = bufferPool.buffer(dataSlot); + dataBuffer.putBytes(0, willMessage.buffer(), willMessage.offset(), willMessage.sizeof()); + + messageSlotReserved = willMessage.sizeof(); + + willProducer = + new KafkaProduceWillProxy(originId, routedId, this, delegate.messagesTopic, deliverAt); + willProducer.doKafkaBegin(traceId, authorization, 0); + willMessageAckCount++; + if ((willMessage.flags() & 1 << MqttPublishFlags.RETAIN.value()) != 0) + { + willRetainProducer = + new KafkaProduceWillProxy(originId, routedId, this, delegate.retainedTopic, deliverAt); + willRetainProducer.doKafkaBegin(traceId, authorization, 0); + willMessageAckCount++; + } + } + else + { + doKafkaEnd(traceId, authorization); + } + } + } + } + + private void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + + assert replyAck <= replySeq; + + doKafkaEnd(traceId, authorization); + } + + private void onKafkaWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + + assert acknowledge <= sequence; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + } + + private void doKafkaReset( + long traceId) + { + if (MqttKafkaState.initialOpened(state) && !MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + replyMax = bufferPool.slotCapacity(); + + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + + private void onWillMessageAcked( + long traceId, + long authorization) + { + if (--willMessageAckCount == 0) + { + bufferPool.release(dataSlot); + dataSlot = NO_SLOT; + messageSlotOffset = 0; + + // Cleanup will message + will signal + String16FW key = new String16FW.Builder().wrap(willKeyBuffer, 0, willKeyBuffer.capacity()) + .set(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId, StandardCharsets.UTF_8).build(); + Flyweight kafkaWillDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(key.length()) + .value(key.value(), 0, key.length())) + .hashKey(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length()))) + .build(); + + delegate.doKafkaData(traceId, authorization, kafkaWillDataEx); + + String16FW willSignalKey = new String16FW.Builder() + .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + Flyweight willSignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(willSignalKey.length()) + .value(willSignalKey.value(), 0, willSignalKey.length())) + .hashKey(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME.length()) + .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) + .valueLen(WILL_SIGNAL_NAME.length()) + .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + .build(); + + delegate.doKafkaData(traceId, authorization, willSignalKafkaDataEx); + + doKafkaEnd(traceId, authorization); + } + } + } + + private final class KafkaProduceWillProxy + { + private MessageConsumer kafka; + private final long originId; + private final long routedId; + private final long initialId; + private final String16FW kafkaTopic; + private final long deliverAt; + private final long replyId; + private final KafkaFetchWillProxy delegate; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaProduceWillProxy( + long originId, + long routedId, + KafkaFetchWillProxy delegate, + String16FW kafkaTopic, + long deliverAt) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.kafkaTopic = kafkaTopic; + this.deliverAt = deliverAt; + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(this::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, kafkaTopic); + } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int capabilities) + { + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding, replyPad, capabilities); + } + + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onSignal(signal); + break; + } + } + + private void onSignal(SignalFW signal) + { + final int signalId = signal.signalId(); + + switch (signalId) + { + case SIGNAL_DELIVER_WILL_MESSAGE: + onWillDeliverSignal(signal); + break; + default: + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + doKafkaWindow(traceId, authorization, 0, 0, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + doKafkaReset(traceId); + } + + private void onKafkaWindow( + WindowFW window) + { + final long traceId = window.traceId(); + final long authorization = window.authorization(); + final long budgetId = window.budgetId(); + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + + if (!wasOpen) + { + final long signalId = + signaler.signalAt(deliverAt, originId, routedId, initialId, SIGNAL_DELIVER_WILL_MESSAGE, 0); + willDeliverIds.computeIfAbsent(delegate.clientId, k -> new LongArrayList()).add(signalId); + } + if (initialAck == delegate.messageSlotReserved) + { + doKafkaEnd(traceId, authorization); + delegate.onWillMessageAcked(traceId, authorization); + } + } + + + private void onWillDeliverSignal(SignalFW signal) + { + sendWill(signal.traceId(), signal.authorization(), 0); + willDeliverIds.remove(delegate.clientId); + } + + private void sendWill( + long traceId, + long authorization, + long budgetId) + { + final MutableDirectBuffer dataBuffer = bufferPool.buffer(delegate.dataSlot); + // TODO: data fragmentation + final MqttWillMessageFW will = mqttWillRO.wrap(dataBuffer, delegate.messageSlotOffset, dataBuffer.capacity()); + + Flyweight kafkaDataEx; + + kafkaHeadersRW.wrap(kafkaHeadersBuffer, 0, kafkaHeadersBuffer.capacity()); + + + String topicName = will.topic().asString(); + assert topicName != null; + + final DirectBuffer topicNameBuffer = will.topic().value(); + + final MutableDirectBuffer keyBuffer = new UnsafeBuffer(new byte[topicNameBuffer.capacity() + 4]); + final KafkaKeyFW key = new KafkaKeyFW.Builder() + .wrap(keyBuffer, 0, keyBuffer.capacity()) + .length(topicNameBuffer.capacity()) + .value(topicNameBuffer, 0, topicNameBuffer.capacity()) + .build(); + + String[] topicHeaders = topicName.split("/"); + for (String header : topicHeaders) + { + String16FW topicHeader = new String16FW(header); + addHeader(helper.kafkaFilterHeaderName, topicHeader); + } + + if (will.expiryInterval() != -1) + { + final MutableDirectBuffer expiryBuffer = new UnsafeBuffer(new byte[4]); + expiryBuffer.putInt(0, will.expiryInterval(), ByteOrder.BIG_ENDIAN); + kafkaHeadersRW.item(h -> + { + h.nameLen(helper.kafkaTimeoutHeaderName.sizeof()); + h.name(helper.kafkaTimeoutHeaderName); + h.valueLen(4); + h.value(expiryBuffer, 0, expiryBuffer.capacity()); + }); + } + + if (will.contentType().length() != -1) + { + addHeader(helper.kafkaContentTypeHeaderName, will.contentType()); + } + + if (will.payload().sizeof() != 0 && will.format() != null) + { + addHeader(helper.kafkaFormatHeaderName, will.format()); + } + + if (will.responseTopic().length() != -1) + { + final String16FW responseTopic = will.responseTopic(); + addHeader(helper.kafkaReplyToHeaderName, kafkaTopic); + addHeader(helper.kafkaReplyKeyHeaderName, responseTopic); + + addFiltersHeader(responseTopic); + } + + if (will.correlation().bytes() != null) + { + addHeader(helper.kafkaCorrelationHeaderName, will.correlation().bytes()); + } + + will.properties().forEach(property -> + addHeader(property.key(), property.value())); + + kafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.set(key)) + .headers(kafkaHeadersRW.build())) + .build(); + + doKafkaData(traceId, authorization, budgetId, will.sizeof(), DATA_FLAG_COMPLETE, will.payload().bytes(), kafkaDataEx); + } + + private void doKafkaReset( + long traceId) + { + if (!MqttKafkaState.replyClosed(state)) + { + state = MqttKafkaState.closeReply(state); + + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + } + } + + private void addHeader( + OctetsFW key, + OctetsFW value) + { + kafkaHeadersRW.item(h -> + { + h.nameLen(key.sizeof()); + h.name(key); + h.valueLen(value.sizeof()); + h.value(value); + }); + } + + private void addFiltersHeader( + String16FW responseTopic) + { + final DirectBuffer responseBuffer = responseTopic.value(); + final int capacity = responseBuffer.capacity(); + + int offset = 0; + int matchAt = 0; + while (offset >= 0 && offset < capacity && matchAt != -1) + { + matchAt = indexOfByte(responseBuffer, offset, capacity, SLASH_BYTE); + if (matchAt != -1) + { + addHeader(helper.kafkaReplyFilterHeaderName, responseBuffer, offset, matchAt - offset); + offset = matchAt + 1; + } + } + addHeader(helper.kafkaReplyFilterHeaderName, responseBuffer, offset, capacity - offset); + } + + private void addHeader( + OctetsFW key, + MqttPayloadFormatFW format) + { + String16FW value = format.get() == MqttPayloadFormat.BINARY ? binaryFormat : textFormat; + addHeader(key, value); + } + + private void addHeader( + OctetsFW key, + String16FW value) + { + DirectBuffer buffer = value.value(); + kafkaHeadersRW.item(h -> + { + h.nameLen(key.sizeof()); + h.name(key); + h.valueLen(value.length()); + h.value(buffer, 0, buffer.capacity()); + }); + } + + private void addHeader( + OctetsFW key, + DirectBuffer buffer, + int offset, + int length) + { + kafkaHeadersRW.item(h -> + { + h.nameLen(key.sizeof()); + h.name(key); + h.valueLen(length); + h.value(buffer, offset, length); + }); + } + + private void addHeader(String16FW key, String16FW value) + { + DirectBuffer keyBuffer = key.value(); + DirectBuffer valueBuffer = value.value(); + kafkaHeadersRW.item(h -> + { + h.nameLen(key.length()); + h.name(keyBuffer, 0, keyBuffer.capacity()); + h.valueLen(value.length()); + h.value(valueBuffer, 0, valueBuffer.capacity()); + }); + } + } + + private static int indexOfByte( + DirectBuffer buffer, + int offset, + int limit, + byte value) + { + int byteAt = -1; + for (int index = offset; index < limit; index++) + { + if (buffer.getByte(index) == value) + { + byteAt = index; + break; + } + } + return byteAt; + } + + private static boolean isSetWillFlag( + int flags) + { + return (flags & MqttSessionFlags.WILL.value() << 1) != 0; + } + + private static boolean isSetCleanStart( + int flags) + { + return (flags & MqttSessionFlags.CLEAN_START.value() << 1) != 0; + } + + private abstract class KafkaSessionProxy + { + protected MessageConsumer kafka; + protected final long originId; + protected final long routedId; + protected long initialId; + protected long replyId; + protected final MqttSessionProxy delegate; + + protected int state; + + protected long initialSeq; + protected long initialAck; + protected int initialMax; + + protected long replySeq; + protected long replyAck; + protected int replyMax; + protected int replyPad; + + private KafkaSessionProxy( + long originId, + long routedId, + MqttSessionProxy delegate) + { + this.originId = originId; + this.routedId = routedId; + this.delegate = delegate; + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void doKafkaBeginIfNecessary( + long traceId, + long authorization, + long affinity) + { + if (!MqttKafkaState.initialOpening(state)) + { + doKafkaBegin(traceId, authorization, affinity); + } + } + + protected void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void sendWillSignal( + long traceId, + long authorization) + { + String16FW willSignalKey = new String16FW.Builder() + .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .set(delegate.clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + Flyweight willSignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(willSignalKey.length()) + .value(willSignalKey.value(), 0, willSignalKey.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME.length()) + .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) + .valueLen(WILL_SIGNAL_NAME.length()) + .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + .build(); + + final MqttWillSignalFW willSignal = + mqttWillSignalRW.wrap(willSignalBuffer, 0, willSignalBuffer.capacity()) + .clientId(delegate.clientId) + .delay(delegate.delay) + .deliverAt(supplyTime.getAsLong() + delegate.delay) + .lifetimeId(delegate.lifetimeId) + .willId(delegate.willId) + .instanceId(instanceId.instanceId()) + .build(); + + doKafkaData(traceId, authorization, 0, willSignal.sizeof(), DATA_FLAG_COMPLETE, + willSignal, willSignalKafkaDataEx); + } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + Flyweight payload, + Flyweight extension) + { + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); + final int length = limit - offset; + + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, buffer, offset, length, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doKafkaFlush( + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, extension); + } + + private void doKafkaEnd( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doEnd(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void doKafkaAbort( + long traceId, + long authorization) + { + if (!MqttKafkaState.initialClosed(state)) + { + initialSeq = delegate.initialSeq; + initialAck = delegate.initialAck; + initialMax = delegate.initialMax; + state = MqttKafkaState.closeInitial(state); + + doAbort(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization); + } + } + + private void onKafkaMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onKafkaBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onKafkaData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onKafkaEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onKafkaAbort(abort); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onKafkaWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onKafkaReset(reset); + break; + } + } + + private void onKafkaBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + assert acknowledge >= replyAck; + + replySeq = sequence; + replyAck = acknowledge; + replyMax = maximum; + state = MqttKafkaState.openingReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, 0, 0); + } + + private void onKafkaData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final int reserved = data.reserved(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + doKafkaReset(traceId); + delegate.doMqttAbort(traceId, authorization); + } + else + { + handleKafkaData(data); + } + } + + protected abstract void doKafkaBegin( + long traceId, + long authorization, + long affinity); + + protected abstract void handleKafkaData( + DataFW data); + + protected void onKafkaWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + state = MqttKafkaState.openInitial(state); + + assert initialAck <= initialSeq; + } + + protected void onKafkaEnd( + EndFW end) + { + } + + protected void onKafkaFlush( + FlushFW flush) + { + } + + private void onKafkaAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = MqttKafkaState.closeReply(state); + + assert replyAck <= replySeq; + + delegate.doMqttAbort(traceId, authorization); + } + + protected void sendMigrateSignal(long authorization, long traceId) + { + Flyweight kafkaMigrateDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(delegate.clientIdMigrate.length()) + .value(delegate.clientIdMigrate.value(), 0, delegate.clientIdMigrate.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(c -> c.nameLen(SENDER_ID_NAME.length()) + .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) + .valueLen(delegate.sessionId.length()) + .value(delegate.sessionId.value(), 0, delegate.sessionId.length()))) + .build(); + + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + EMPTY_OCTETS, kafkaMigrateDataEx); + } + + private void onKafkaReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doMqttReset(traceId); } private void doKafkaReset( @@ -862,6 +2397,21 @@ private KafkaSessionSignalProxy( super(originId, routedId, delegate); } + @Override + protected void doKafkaBegin(long traceId, long authorization, long affinity) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.sessionsTopic, null, delegate.clientIdMigrate, + delegate.sessionId, KafkaCapabilities.PRODUCE_AND_FETCH); + } + @Override protected void handleKafkaData(DataFW data) { @@ -878,7 +2428,7 @@ protected void handleKafkaData(DataFW data) kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; - if (key != null) + if (delegate.group != null && key != null) { delegate.group.doKafkaFlush(traceId, authorization, budgetId, reserved); } @@ -908,6 +2458,11 @@ protected void onKafkaWindow( if (!wasOpen) { sendMigrateSignal(authorization, traceId); + + final long routedId = delegate.session.routedId; + + delegate.group = new KafkaGroupProxy(originId, routedId, delegate); + delegate.group.doKafkaBegin(traceId, authorization, 0); } } } @@ -923,7 +2478,28 @@ private KafkaSessionStateProxy( } @Override - protected void handleKafkaData(DataFW data) + protected void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + state = MqttKafkaState.openingInitial(state); + + KafkaCapabilities capabilities = isSetWillFlag(delegate.sessionFlags) ? + KafkaCapabilities.PRODUCE_ONLY : KafkaCapabilities.PRODUCE_AND_FETCH; + kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, delegate.clientIdMigrate, + delegate.sessionId, capabilities); + } + + @Override + protected void handleKafkaData( + DataFW data) { final long traceId = data.traceId(); final long authorization = data.authorization(); @@ -967,6 +2543,7 @@ protected void onKafkaWindow( final long budgetId = window.budgetId(); final int padding = window.padding(); final int capabilities = window.capabilities(); + final boolean wasOpen = MqttKafkaState.initialOpened(state); assert acknowledge <= sequence; assert acknowledge >= delegate.initialAck; @@ -978,7 +2555,39 @@ protected void onKafkaWindow( assert initialAck <= initialSeq; - delegate.doMqttWindow(authorization, traceId, budgetId, padding, capabilities); + if (!wasOpen && !isSetCleanStart(delegate.sessionFlags)) + { + cancelWillSignal(authorization, traceId); + } + + delegate.doMqttWindow(authorization, traceId, budgetId, padding + delegate.willPadding, capabilities); + } + + private void cancelWillSignal(long authorization, long traceId) + { + String16FW willSignalKey = new String16FW.Builder() + .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .set(delegate.clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + Flyweight willSignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(now().toEpochMilli()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(willSignalKey.length()) + .value(willSignalKey.value(), 0, willSignalKey.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME.length()) + .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) + .valueLen(WILL_SIGNAL_NAME.length()) + .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + .build(); + + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + null, willSignalKafkaDataEx); } @Override @@ -999,7 +2608,7 @@ protected void onKafkaFlush( assert replyAck <= replySeq; - delegate.doMqttData(traceId, authorization, budgetId, reserved, DATA_FLAG_COMPLETE, EMPTY_OCTETS); + delegate.doMqttData(traceId, authorization, budgetId, 0, DATA_FLAG_COMPLETE, EMPTY_OCTETS); } @Override @@ -1023,6 +2632,76 @@ protected void onKafkaEnd( } } + private final class KafkaFetchWillSignalProxy extends KafkaSessionProxy + { + private KafkaFetchWillSignalProxy( + long originId, + long routedId, + MqttSessionProxy delegate) + { + super(originId, routedId, delegate); + } + + @Override + protected void doKafkaBegin( + long traceId, + long authorization, + long affinity) + { + if (!MqttKafkaState.initialOpening(state)) + { + state = MqttKafkaState.openingInitial(state); + + kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId); + } + } + + @Override + protected void handleKafkaData( + DataFW data) + { + final OctetsFW extension = data.extension(); + final OctetsFW payload = data.payload(); + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = + dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + final KafkaMergedDataExFW kafkaMergedDataEx = + kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; + final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; + + if (key != null) + { + MqttWillSignalFW willMessage = + mqttWillSignalRO.tryWrap(payload.buffer(), payload.offset(), payload.limit()); + delegate.lifetimeId = willMessage.lifetimeId().asString(); + } + } + + @Override + protected void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + + assert replyAck <= replySeq; + + delegate.session.doKafkaEnd(traceId, authorization); + final long routedId = delegate.session.routedId; + + delegate.session = new KafkaSessionSignalProxy(originId, routedId, delegate); + delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0); + } + } + private final class KafkaGroupProxy { private MessageConsumer kafka; @@ -1078,12 +2757,11 @@ private void doKafkaFlush( initialSeq = delegate.initialSeq; doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, budgetId, reserved); + traceId, authorization, budgetId, reserved, EMPTY_OCTETS); } private void doKafkaEnd( long traceId, - long sequence, long authorization) { if (!MqttKafkaState.initialClosed(state)) @@ -1165,6 +2843,7 @@ private void onKafkaBegin( assert replyAck <= replySeq; delegate.doMqttBegin(traceId, authorization, affinity); + doKafkaWindow(traceId, authorization, 0, 0, 0); } private void onKafkaData( @@ -1174,7 +2853,6 @@ private void onKafkaData( final long acknowledge = data.acknowledge(); final long traceId = data.traceId(); final long authorization = data.authorization(); - final long budgetId = data.budgetId(); final int reserved = data.reserved(); assert acknowledge <= sequence; @@ -1205,15 +2883,16 @@ private void onKafkaData( if (members > 1) { delegate.session.sendMigrateSignal(authorization, traceId); - doKafkaEnd(traceId, sequence, authorization); + delegate.session.sendWillSignal(authorization, traceId); + delegate.session.doKafkaEnd(traceId, authorization); + doKafkaEnd(traceId, authorization); } else { - delegate.session.doKafkaEnd(traceId, sequence, authorization); + delegate.session.doKafkaEnd(traceId, authorization); final long routedId = delegate.session.routedId; delegate.session = new KafkaSessionStateProxy(originId, routedId, delegate); - delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0, - delegate.clientId, delegate.clientIdMigrate, delegate.sessionId); + delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0); } } } @@ -1461,7 +3140,8 @@ private void doFlush( long traceId, long authorization, long budgetId, - int reserved) + int reserved, + Flyweight extension) { final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -1474,6 +3154,7 @@ private void doFlush( .authorization(authorization) .budgetId(budgetId) .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) .build(); receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); @@ -1493,15 +3174,17 @@ private MessageConsumer newKafkaStream( String16FW sessionsTopicName, String16FW clientId, String16FW clientIdMigrate, - String16FW sessionId) + String16FW sessionId, + KafkaCapabilities capabilities) { final KafkaBeginExFW kafkaBeginEx = kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) .merged(m -> { - m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)); + m.capabilities(c -> c.set(capabilities)); m.topic(sessionsTopicName); + m.groupId(MQTT_CLIENTS_GROUP_ID); if (clientId != null) { m.partitionsItem(p -> @@ -1548,6 +3231,209 @@ private MessageConsumer newKafkaStream( return receiver; } + private MessageConsumer newKafkaStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW topic) + { + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_ONLY)) + .topic(topic) + .partitionsItem(p -> p.partitionId(-1).partitionOffset(-2L)) + .ackMode(b -> b.set(KAFKA_DEFAULT_ACK_MODE))) + .build(); + + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private MessageConsumer newKafkaStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW topic, + String16FW clientId) + { + String16FW key = new String16FW(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX); + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> + m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)) + .topic(topic) + .groupId(MQTT_CLIENTS_GROUP_ID) + .partitionsItem(p -> + p.partitionId(KafkaOffsetType.HISTORICAL.value()) + .partitionOffset(KafkaOffsetType.HISTORICAL.value())) + .filtersItem(f -> + f.conditionsItem(c -> + c.key(k -> k.length(key.length()) + .value(key.value(), 0, key.length())))) + .evaluation(b -> b.set(KafkaEvaluation.EAGER))) + .build(); + + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private MessageConsumer newKafkaStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW clientId, + String16FW lifetimeId, + String16FW topic) + { + String16FW key = new String16FW(clientId.asString() + WILL_KEY_POSTFIX + lifetimeId.asString()); + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> + m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)) + .topic(topic) + .partitionsItem(p -> + p.partitionId(KafkaOffsetType.HISTORICAL.value()) + .partitionOffset(KafkaOffsetType.HISTORICAL.value())) + .filtersItem(f -> + f.conditionsItem(c -> + c.key(k -> k.length(key.length()) + .value(key.value(), 0, key.length())))) + .evaluation(b -> b.set(KafkaEvaluation.EAGER))) + .build(); + + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + + private MessageConsumer newWillStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + String16FW sessionsTopicName) + { + final KafkaBeginExFW kafkaBeginEx = + kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> + m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)) + .topic(sessionsTopicName) + .groupId(MQTT_CLIENTS_GROUP_ID) + .filtersItem(f -> + f.conditionsItem(c -> c.header(h -> + h.nameLen(TYPE_HEADER_NAME.length()) + .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) + .valueLen(WILL_SIGNAL_NAME.length()) + .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length())))) + .ackMode(b -> b.set(KAFKA_DEFAULT_ACK_MODE))) + .build(); + + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(kafkaBeginEx.buffer(), kafkaBeginEx.offset(), kafkaBeginEx.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + private MessageConsumer newGroupStream( MessageConsumer sender, long originId, diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java index c1b2c181c2..cc7a4145c3 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java @@ -15,8 +15,15 @@ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.INSTANCE_ID; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.LIFETIME_ID; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.MESSAGES_TOPIC; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.RETAINED_MESSAGES_TOPIC; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_ID; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.TIME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.WILL_AVAILABLE; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.WILL_ID; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.WILL_STREAM_RECONNECT_DELAY; import static org.junit.Assert.assertEquals; import org.junit.Test; @@ -25,11 +32,25 @@ public class MqttKafkaConfigurationTest { public static final String MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.messages.topic"; public static final String RETAINED_MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.retained.messages.topic"; + public static final String TIME_NAME = "zilla.binding.mqtt.kafka.time"; + public static final String WILL_AVAILABLE_NAME = "zilla.binding.mqtt.kafka.will.available"; + public static final String WILL_STREAM_RECONNECT_DELAY_NAME = "zilla.binding.mqtt.kafka.will.stream.reconnect"; + public static final String SESSION_ID_NAME = "zilla.binding.mqtt.kafka.session.id"; + public static final String WILL_ID_NAME = "zilla.binding.mqtt.kafka.will.id"; + public static final String LIFETIME_ID_NAME = "zilla.binding.mqtt.kafka.lifetime.id"; + public static final String INSTANCE_ID_NAME = "zilla.binding.mqtt.kafka.instance.id"; @Test public void shouldVerifyConstants() { assertEquals(MESSAGES_TOPIC.name(), MESSAGES_TOPIC_NAME); assertEquals(RETAINED_MESSAGES_TOPIC.name(), RETAINED_MESSAGES_TOPIC_NAME); + assertEquals(TIME.name(), TIME_NAME); + assertEquals(WILL_AVAILABLE.name(), WILL_AVAILABLE_NAME); + assertEquals(WILL_STREAM_RECONNECT_DELAY.name(), WILL_STREAM_RECONNECT_DELAY_NAME); + assertEquals(SESSION_ID.name(), SESSION_ID_NAME); + assertEquals(WILL_ID.name(), WILL_ID_NAME); + assertEquals(LIFETIME_ID.name(), LIFETIME_ID_NAME); + assertEquals(INSTANCE_ID.name(), INSTANCE_ID_NAME); } } diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java index d9f2922ff9..4720b202df 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java @@ -14,6 +14,7 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -28,6 +29,7 @@ import io.aklivity.zilla.runtime.engine.test.EngineRule; import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; +import io.aklivity.zilla.runtime.engine.test.annotation.Configure; public class MqttKafkaPublishProxyIT { @@ -52,6 +54,7 @@ public class MqttKafkaPublishProxyIT @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.client.sent.abort/client", "${kafka}/publish.client.sent.abort/server"}) @@ -62,6 +65,7 @@ public void shouldReceiveClientSentAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.client.sent.reset/client", "${kafka}/publish.client.sent.reset/server"}) @@ -72,6 +76,7 @@ public void shouldReceiveClientSentReset() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.server.sent.abort/client", "${kafka}/publish.server.sent.abort/server"}) @@ -82,6 +87,7 @@ public void shouldReceiveServerSentAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.server.sent.flush/client", "${kafka}/publish.server.sent.flush/server"}) @@ -92,6 +98,7 @@ public void shouldReceiveServerSentFlush() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.server.sent.reset/client", "${kafka}/publish.server.sent.reset/server"}) @@ -102,6 +109,7 @@ public void shouldReceiveServerSentReset() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.server.sent.data/client", "${kafka}/publish.server.sent.data/server"}) @@ -112,6 +120,7 @@ public void shouldAbortWhenServerSentData() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.retained.server.sent.abort/client", "${kafka}/publish.retained.server.sent.abort/server"}) @@ -122,6 +131,7 @@ public void shouldPublishRetainedThenReceiveServerSentAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.retained.server.sent.flush/client", "${kafka}/publish.retained.server.sent.flush/server"}) @@ -132,6 +142,7 @@ public void shouldPublishRetainedThenReceiveServerSentFlush() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.retained.server.sent.reset/client", "${kafka}/publish.retained.server.sent.reset/server"}) @@ -142,6 +153,7 @@ public void shouldPublishRetainedThenReceiveServerSentReset() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.retained.server.sent.data/client", "${kafka}/publish.retained.server.sent.data/server"}) @@ -152,6 +164,7 @@ public void shouldPublishRetainedThenAbortWhenServerSentData() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.one.message/client", "${kafka}/publish.one.message/server"}) @@ -162,6 +175,7 @@ public void shouldSendOneMessage() throws Exception @Test @Configuration("proxy.options.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.one.message/client", "${kafka}/publish.one.message.changed.topic.name/server"}) @@ -172,6 +186,7 @@ public void shouldSendOneMessageWithChangedTopicName() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.retained/client", "${kafka}/publish.retained/server"}) @@ -182,6 +197,7 @@ public void shouldPublishRetainedMessage() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.empty.message/client", "${kafka}/publish.empty.message/server"}) @@ -192,6 +208,7 @@ public void shouldSendEmptyMessage() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.multiple.messages/client", "${kafka}/publish.multiple.messages/server"}) @@ -202,6 +219,7 @@ public void shouldSendMultipleMessages() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.multiple.clients/client", "${kafka}/publish.multiple.clients/server"}) @@ -222,6 +240,7 @@ public void shouldSendWithUserProperty() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.with.user.properties.distinct/client", "${kafka}/publish.with.user.properties.distinct/server"}) @@ -232,6 +251,7 @@ public void shouldSendWithDistinctUserProperties() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.with.user.properties.repeated/client", "${kafka}/publish.with.user.properties.repeated/server"}) diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java index 438cc4d612..cebf00d7e1 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java @@ -14,8 +14,15 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_ID; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.INSTANCE_ID_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.LIFETIME_ID_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_ID_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.TIME_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_AVAILABLE_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_ID_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_STREAM_RECONNECT_DELAY_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -29,6 +36,7 @@ import io.aklivity.zilla.runtime.engine.test.EngineRule; import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; +import io.aklivity.zilla.runtime.engine.test.annotation.Configure; public class MqttKafkaSessionProxyIT { @@ -44,7 +52,7 @@ public class MqttKafkaSessionProxyIT .responseBufferCapacity(1024) .counterValuesBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) - .configure(SESSION_ID, () -> "sender-1") + .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/kafka/config") .external("kafka0") .clean(); @@ -54,6 +62,9 @@ public class MqttKafkaSessionProxyIT @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.abort.reconnect.non.clean.start/client", "${kafka}/session.abort.reconnect.non.clean.start/server"}) @@ -64,6 +75,13 @@ public void shouldReconnectNonCleanStart() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") @Specification({ "${mqtt}/session.client.takeover/client", "${kafka}/session.client.takeover/server"}) @@ -74,6 +92,13 @@ public void shouldTakeOverSession() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") @Specification({ "${mqtt}/session.exists.clean.start/client", "${kafka}/session.exists.clean.start/server"}) @@ -84,6 +109,9 @@ public void shouldRemoveSessionAtCleanStart() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.subscribe/client", "${kafka}/session.subscribe/server"}) @@ -94,6 +122,9 @@ public void shouldSubscribeSaveSubscriptionsInSession() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.subscribe.via.session.state/client", "${kafka}/session.subscribe.via.session.state/server"}) @@ -104,6 +135,9 @@ public void shouldReceiveMessageSubscribedViaSessionState() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.unsubscribe.after.subscribe/client", "${kafka}/session.unsubscribe.after.subscribe/server"}) @@ -114,6 +148,9 @@ public void shouldUnsubscribeAndUpdateSessionState() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.unsubscribe.via.session.state/client", "${kafka}/session.unsubscribe.via.session.state/server"}) @@ -124,6 +161,9 @@ public void shouldUnsubscribeViaSessionState() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.client.sent.reset/client", "${kafka}/session.client.sent.reset/server"}) @@ -134,6 +174,9 @@ public void shouldSessionStreamReceiveClientSentReset() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.server.sent.reset/client", "${kafka}/session.server.sent.reset/server"}) @@ -144,6 +187,9 @@ public void shouldSessionStreamReceiveServerSentReset() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.server.sent.reset/client", "${kafka}/session.group.server.sent.reset/server"}) @@ -151,4 +197,201 @@ public void shouldGroupStreamReceiveServerSentReset() throws Exception { k3po.finish(); } + + @Test + @Configuration("proxy.yaml") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = LIFETIME_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") + @Configure(name = WILL_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Specification({ + "${mqtt}/session.will.message.normal.disconnect/client", + "${kafka}/session.will.message.normal.disconnect/server"}) + public void shouldNotSendWillMessageOnNormalDisconnect() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = LIFETIME_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") + @Configure(name = WILL_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Specification({ + "${mqtt}/session.will.message.clean.start/client", + "${kafka}/session.will.message.clean.start/server"}) + public void shouldGenerateLifeTimeIdOnCleanStart() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = LIFETIME_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") + @Configure(name = WILL_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Specification({ + "${mqtt}/session.will.message.abort.deliver.will/client", + "${kafka}/session.will.message.abort.deliver.will/server"}) + public void shouldSendWillMessageOnAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = LIFETIME_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") + @Configure(name = WILL_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Specification({ + "${mqtt}/session.will.message.abort.deliver.will/client", + "${kafka}/session.will.message.will.id.mismatch.no.deliver/server"}) + public void shouldNotSendWillMessageOnWillIdMismatch() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = LIFETIME_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") + @Configure(name = WILL_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Specification({ + "${mqtt}/session.will.message.abort.deliver.will.retain/client", + "${kafka}/session.will.message.abort.deliver.will.retain/server"}) + public void shouldSaveWillMessageAsRetain() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = LIFETIME_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") + @Configure(name = WILL_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Specification({ + "${mqtt}/session.will.message.client.takeover.deliver.will/client", + "${kafka}/session.will.message.takeover.deliver.will/server"}) + public void shouldSendWillMessageOnAbortClientTakeover() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = LIFETIME_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") + @Configure(name = WILL_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") + @Configure(name = INSTANCE_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") + @Configure(name = TIME_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Specification({ + "${kafka}/session.will.message.cancel.delivery/server"}) + public void shouldCancelWillDelivery() throws Exception + { + k3po.start(); + Thread.sleep(1000); + k3po.notifyBarrier("WAIT_1_SECOND"); + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_STREAM_RECONNECT_DELAY_NAME, value = "1") + @Specification({ + "${kafka}/session.will.stream.end.reconnect/server"}) + public void shouldReconnectWillStreamOnKafkaEnd() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_STREAM_RECONNECT_DELAY_NAME, value = "1") + @Specification({ + "${kafka}/session.will.stream.abort.reconnect/server"}) + public void shouldReconnectWillStreamOnKafkaAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_STREAM_RECONNECT_DELAY_NAME, value = "1") + @Specification({ + "${kafka}/session.will.stream.reset.reconnect/server"}) + public void shouldReconnectWillStreamOnKafkaReset() throws Exception + { + k3po.finish(); + } + + public static String supplySessionId() + { + return "sender-1"; + } + + public static String supplyWillId() + { + return "d252a6bd-abb5-446a-b0f7-d0a3d8c012e2"; + } + + public static String supplyLifetimeId() + { + return "1e6a1eb5-810a-459d-a12c-a6fa08f228d1"; + } + + public static String supplyInstanceId() + { + return "zilla-1"; + } + + public static long supplyTime() + { + return 1000L; + } } diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java index 9e5cb14695..93ad269c1e 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java @@ -14,6 +14,7 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; @@ -29,6 +30,7 @@ import io.aklivity.zilla.runtime.engine.test.EngineRule; import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; +import io.aklivity.zilla.runtime.engine.test.annotation.Configure; public class MqttKafkaSubscribeProxyIT { @@ -54,6 +56,7 @@ public class MqttKafkaSubscribeProxyIT @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.client.sent.abort/client", "${kafka}/subscribe.client.sent.abort/server"}) @@ -64,6 +67,7 @@ public void shouldReceiveClientSentAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.client.sent.data/client", "${kafka}/subscribe.client.sent.data/server"}) @@ -74,6 +78,7 @@ public void shouldAbortWhenClientSentData() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.client.sent.reset/client", "${kafka}/subscribe.client.sent.reset/server"}) @@ -84,6 +89,7 @@ public void shouldReceiveClientSentReset() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.server.sent.abort/client", "${kafka}/subscribe.server.sent.abort/server"}) @@ -94,6 +100,7 @@ public void shouldReceiveServerSentAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.server.sent.flush/client", "${kafka}/subscribe.server.sent.flush/server"}) @@ -104,6 +111,7 @@ public void shouldReceiveServerSentFlush() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.server.sent.reset/client", "${kafka}/subscribe.server.sent.reset/server"}) @@ -114,6 +122,7 @@ public void shouldReceiveServerSentReset() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.retained.server.sent.abort/client", "${kafka}/subscribe.retained.server.sent.abort/server"}) @@ -124,6 +133,7 @@ public void shouldReceiveServerSentRetainedAbort() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.retained.server.sent.reset/client", "${kafka}/subscribe.retained.server.sent.reset/server"}) @@ -134,6 +144,7 @@ public void shouldReceiveServerSentRetainedReset() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.one.message/client", "${kafka}/subscribe.one.message/server"}) @@ -144,6 +155,7 @@ public void shouldReceiveOneMessage() throws Exception @Test @Configuration("proxy.options.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.one.message/client", "${kafka}/subscribe.one.message.changed.topic.name/server"}) @@ -154,6 +166,7 @@ public void shouldReceiveOneMessageWithChangedTopicName() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.multiple.message/client", "${kafka}/subscribe.multiple.message/server"}) @@ -164,6 +177,7 @@ public void shouldReceiveMultipleMessage() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.retain.as.published/client", "${kafka}/subscribe.retain/server"}) @@ -174,6 +188,7 @@ public void shouldReceiveRetainAsPublished() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.retain/client", "${kafka}/subscribe.retain/server"}) @@ -184,6 +199,7 @@ public void shouldReceiveRetainedNoRetainAsPublished() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.filter.change.retain/client", "${kafka}/subscribe.filter.change.retain/server"}) @@ -194,6 +210,7 @@ public void shouldReceiveRetainedAfterFilterChange() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.filter.change.retain/client", "${kafka}/subscribe.filter.change.retain.buffer/server"}) @@ -204,6 +221,7 @@ public void shouldReceiveRetainedAfterFilterChangeBufferMessages() throws Except @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.deferred.filter.change.retain/client", "${kafka}/subscribe.deferred.filter.change.retain/server"}) @@ -214,6 +232,7 @@ public void shouldReceiveRetainedAfterDeferredFilterChange() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.filter.change.retain.resubscribe/client", "${kafka}/subscribe.filter.change.retain.resubscribe/server"}) @@ -224,6 +243,7 @@ public void shouldReceiveRetainedAfterResubscribe() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.one.message.receive.response.topic.and.correlation.data/client", "${kafka}/subscribe.one.message.receive.response.topic.and.correlation.data/server"}) @@ -234,6 +254,7 @@ public void shouldReceiveCorrelationData() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.one.message.user.properties.unaltered/client", "${kafka}/subscribe.one.message.user.properties.unaltered/server"}) @@ -244,6 +265,7 @@ public void shouldReceiveOneMessageWithUserPropertiesUnaltered() throws Exceptio @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.publish.no.local/client", "${kafka}/subscribe.publish.no.local/server"}) @@ -254,6 +276,7 @@ public void shouldNotReceiveLocal() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.receive.message.overlapping.wildcard/client", "${kafka}/subscribe.receive.message.overlapping.wildcard/server"}) @@ -264,6 +287,7 @@ public void shouldReceiveMessageOverlappingWildcard() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.receive.message.wildcard/client", "${kafka}/subscribe.receive.message.wildcard/server"}) @@ -274,6 +298,7 @@ public void shouldReceiveOneMessageWithPatternTopic() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filter.multi.level.wildcard/client", "${kafka}/subscribe.topic.filter.multi.level.wildcard/server"}) @@ -284,6 +309,7 @@ public void shouldFilterMultiLevelWildcard() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filter.single.and.multi.level.wildcard/client", "${kafka}/subscribe.topic.filter.single.and.multi.level.wildcard/server"}) @@ -294,6 +320,7 @@ public void shouldFilterSingleAndMultiLevelWildcard() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filter.single.level.wildcard/client", "${kafka}/subscribe.topic.filter.single.level.wildcard/server"}) @@ -304,6 +331,7 @@ public void shouldFilterSingleLevelWildcard() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filter.two.single.level.wildcard/client", "${kafka}/subscribe.topic.filter.two.single.level.wildcard/server"}) @@ -314,6 +342,7 @@ public void shouldFilterTwoSingleLevelWildcard() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filters.aggregated.both.exact/client", "${kafka}/subscribe.topic.filters.aggregated.both.exact/server"}) @@ -324,6 +353,7 @@ public void shouldFilterAggregatedBothExact() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filters.aggregated.exact.and.wildcard/client", "${kafka}/subscribe.topic.filters.aggregated.exact.and.wildcard/server"}) @@ -334,6 +364,7 @@ public void shouldFilterAggregatedExactAndWildcard() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filters.isolated.both.exact/client", "${kafka}/subscribe.topic.filters.isolated.both.exact/server"}) @@ -344,6 +375,7 @@ public void shouldFilterIsolatedBothExact() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filters.isolated.exact.and.wildcard/client", "${kafka}/subscribe.topic.filters.isolated.exact.and.wildcard/server"}) @@ -354,6 +386,7 @@ public void shouldFilterIsolatedExactAndWildcard() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/subscribe.topic.filters.overlapping.wildcards/client", "${kafka}/subscribe.topic.filters.overlapping.wildcards/server"}) @@ -364,6 +397,7 @@ public void shouldFilterOverlappingWildcard() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/unsubscribe.after.subscribe/client", "${kafka}/unsubscribe.after.subscribe/server"}) @@ -374,6 +408,7 @@ public void shouldAcknowledge() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/unsubscribe.topic.filter.single/client", "${kafka}/unsubscribe.topic.filter.single/server"}) diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index b1ef279da1..6cca025d73 100644 --- a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -15,6 +15,7 @@ */ package io.aklivity.zilla.specs.binding.mqtt.internal; +import static java.lang.System.currentTimeMillis; import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.ByteBuffer; @@ -31,7 +32,6 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.Array32FW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttBinaryFW; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttMessageFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormat; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormatFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPublishFlags; @@ -41,6 +41,8 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSubscribeFlags; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttTopicFilterFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttUserPropertyFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttWillMessageFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttWillSignalFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.String16FW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.Varuint32FW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttBeginExFW; @@ -119,6 +121,18 @@ public static MqttWillMessageBuilder will() return new MqttWillMessageBuilder(); } + @Function + public static MqttWillSignalBuilder willSignal() + { + return new MqttWillSignalBuilder(); + } + + @Function + public static long timestamp() + { + return currentTimeMillis(); + } + @Function public static byte[] randomBytes( int length) @@ -739,7 +753,8 @@ public byte[] build() public static final class MqttWillMessageBuilder { - private final MqttMessageFW.Builder willMessageRW = new MqttMessageFW.Builder(); + private final MqttWillMessageFW.Builder willMessageRW = new MqttWillMessageFW.Builder(); + private final MqttWillMessageFW willMessageRO = new MqttWillMessageFW(); private MqttWillMessageBuilder() { @@ -806,6 +821,20 @@ public MqttWillMessageBuilder responseTopic( return this; } + public MqttWillMessageBuilder lifetimeId( + String lifetimeId) + { + willMessageRW.lifetimeId(lifetimeId); + return this; + } + + public MqttWillMessageBuilder willId( + String willId) + { + willMessageRW.willId(willId); + return this; + } + public MqttWillMessageBuilder correlation( String correlation) { @@ -844,13 +873,74 @@ public MqttWillMessageBuilder payloadBytes( public byte[] build() { - final MqttMessageFW willMessage = willMessageRW.build(); + final MqttWillMessageFW willMessage = willMessageRW.build(); final byte[] array = new byte[willMessage.sizeof()]; willMessage.buffer().getBytes(willMessage.offset(), array); return array; } } + public static final class MqttWillSignalBuilder + { + private final MqttWillSignalFW.Builder willSignalRW = new MqttWillSignalFW.Builder(); + + private MqttWillSignalBuilder() + { + MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + willSignalRW.wrap(writeBuffer, 0, writeBuffer.capacity()); + } + + public MqttWillSignalBuilder clientId( + String clientId) + { + willSignalRW.clientId(clientId); + return this; + } + + public MqttWillSignalBuilder delay( + int delay) + { + willSignalRW.delay(delay); + return this; + } + + public MqttWillSignalBuilder deliverAt( + long deliverAt) + { + willSignalRW.deliverAt(deliverAt); + return this; + } + + public MqttWillSignalBuilder lifetimeId( + String lifetimeId) + { + willSignalRW.lifetimeId(lifetimeId); + return this; + } + + public MqttWillSignalBuilder willId( + String willId) + { + willSignalRW.willId(willId); + return this; + } + + public MqttWillSignalBuilder instanceId( + String instanceId) + { + willSignalRW.instanceId(instanceId); + return this; + } + + public byte[] build() + { + final MqttWillSignalFW willSignal = willSignalRW.build(); + final byte[] array = new byte[willSignal.sizeof()]; + willSignal.buffer().getBytes(willSignal.offset(), array); + return array; + } + } + public static final class MqttBeginExMatcherBuilder { private final DirectBuffer bufferRO = new UnsafeBuffer(); diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index 7265b549b3..b1d22471d1 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -54,6 +54,11 @@ scope mqtt TEXT } + enum MqttWillDeliverAt (int64) + { + UNKNOWN(-1L) + } + struct MqttBinary { int32 length; @@ -80,7 +85,17 @@ scope mqtt MqttTopicFilter[] subscriptions; } - struct MqttMessage + struct MqttWillSignal + { + string16 clientId; + int32 delay = 0; + int64 deliverAt = -1; + string16 lifetimeId = null; + string16 willId = null; + string16 instanceId; + } + + struct MqttWillMessage { string16 topic = null; int32 delay = 0; @@ -90,6 +105,8 @@ scope mqtt string16 contentType = null; MqttPayloadFormat format = BINARY; string16 responseTopic = null; + string16 lifetimeId = null; + string16 willId = null; MqttBinary correlation; MqttUserProperty[] properties; MqttBinary payload; diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index e6dd35db93..43707aa657 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -29,9 +29,10 @@ import org.junit.Test; import org.kaazing.k3po.lang.el.BytesMatcher; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttMessageFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormat; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionStateFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttWillMessageFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttWillSignalFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttFlushExFW; @@ -1217,13 +1218,15 @@ public void shouldEncodeWillMessage() .contentType("message") .format("TEXT") .responseTopic("will.client.response") + .lifetimeId("1") + .willId("2") .correlation("request-id-1") .userProperty("name", "value") .payload("client failed") .build(); DirectBuffer buffer = new UnsafeBuffer(array); - MqttMessageFW willMessage = new MqttMessageFW().wrap(buffer, 0, buffer.capacity()); + MqttWillMessageFW willMessage = new MqttWillMessageFW().wrap(buffer, 0, buffer.capacity()); assertEquals("will.client", willMessage.topic().asString()); assertEquals(20, willMessage.delay()); @@ -1231,6 +1234,8 @@ public void shouldEncodeWillMessage() assertEquals("message", willMessage.contentType().asString()); assertEquals("TEXT", willMessage.format().toString()); assertEquals("will.client.response", willMessage.responseTopic().asString()); + assertEquals("1", willMessage.lifetimeId().asString()); + assertEquals("2", willMessage.willId().asString()); assertEquals("request-id-1", willMessage.correlation() .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); assertNotNull(willMessage.properties() @@ -1254,7 +1259,7 @@ public void shouldEncodeWillMessageBytesPayload() .build(); DirectBuffer buffer = new UnsafeBuffer(array); - MqttMessageFW willMessage = new MqttMessageFW().wrap(buffer, 0, buffer.capacity()); + MqttWillMessageFW willMessage = new MqttWillMessageFW().wrap(buffer, 0, buffer.capacity()); assertEquals("will.client", willMessage.topic().asString()); assertEquals(1, willMessage.flags()); @@ -1266,4 +1271,49 @@ public void shouldEncodeWillMessageBytesPayload() assertArrayEquals(new byte[] {0, 1, 2, 3, 4, 5}, willMessage.payload() .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)).getBytes()); } + + @Test + public void shouldEncodeWillSignal() + { + final byte[] array = MqttFunctions.willSignal() + .clientId("client-1") + .delay(20) + .deliverAt(100000) + .lifetimeId("1") + .willId("2") + .instanceId("zilla-1") + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttWillSignalFW willSignal = new MqttWillSignalFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals("client-1", willSignal.clientId().asString()); + assertEquals(20, willSignal.delay()); + assertEquals(100000, willSignal.deliverAt()); + assertEquals("1", willSignal.lifetimeId().asString()); + assertEquals("2", willSignal.willId().asString()); + assertEquals("zilla-1", willSignal.instanceId().asString()); + } + + @Test + public void shouldEncodeWillSignalUnknownDeliverAt() + { + final byte[] array = MqttFunctions.willSignal() + .clientId("client-1") + .delay(20) + .lifetimeId("1") + .willId("2") + .instanceId("zilla-1") + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttWillSignalFW willSignal = new MqttWillSignalFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals("client-1", willSignal.clientId().asString()); + assertEquals(20, willSignal.delay()); + assertEquals(-1, willSignal.deliverAt()); + assertEquals("1", willSignal.lifetimeId().asString()); + assertEquals("2", willSignal.willId().asString()); + assertEquals("zilla-1", willSignal.instanceId().asString()); + } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 5daa0866e1..4bbe869c59 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -112,11 +112,11 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttBinaryFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttMessageFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttQoS; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttSessionStateFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttTopicFilterFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttWillMessageFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Varuint32FW; @@ -246,7 +246,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttDataExFW.Builder mqttSubscribeDataExRW = new MqttDataExFW.Builder(); private final MqttDataExFW.Builder mqttSessionDataExRW = new MqttDataExFW.Builder(); private final MqttFlushExFW.Builder mqttFlushExRW = new MqttFlushExFW.Builder(); - private final MqttMessageFW.Builder mqttMessageFW = new MqttMessageFW.Builder(); + private final MqttWillMessageFW.Builder mqttWillMessageRW = new MqttWillMessageFW.Builder(); private final MqttSessionStateFW.Builder mqttSessionStateFW = new MqttSessionStateFW.Builder(); private final MqttPacketHeaderFW mqttPacketHeaderRO = new MqttPacketHeaderFW(); private final MqttConnectFW mqttConnectRO = new MqttConnectFW(); @@ -1773,8 +1773,8 @@ else if (this.authField.equals(MqttConnectProperty.PASSWORD)) .typeId(mqttTypeId) .session(s -> s.kind(k -> k.set(MqttSessionDataKind.WILL))); - final MqttMessageFW.Builder willMessageBuilder = - mqttMessageFW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity()) + final MqttWillMessageFW.Builder willMessageBuilder = + mqttWillMessageRW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity()) .topic(payload.willTopic) .delay(payload.willDelay) .qos(willQos) @@ -1790,7 +1790,7 @@ else if (this.authField.equals(MqttConnectProperty.PASSWORD)) c -> willMessageBuilder.propertiesItem(p -> p.key(c.key()).value(c.value()))); willMessageBuilder.payload(p -> p.bytes(payload.willPayload.bytes())); - final MqttMessageFW will = willMessageBuilder.build(); + final MqttWillMessageFW will = willMessageBuilder.build(); final int willPayloadSize = willMessageBuilder.sizeof(); if (!sessionStream.hasSessionWindow(willPayloadSize)) diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index 72eadf4add..427a07998e 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -3356,6 +3356,7 @@ public final class KafkaMergedFlushExMatcherBuilder private Array32FW.Builder progressRW; private KafkaKeyFW.Builder keyRW; private KafkaOffsetFW.Builder partitionRW; + private KafkaCapabilities capabilities; private Array32FW.Builder filtersRW; @@ -3363,6 +3364,13 @@ private KafkaMergedFlushExMatcherBuilder() { } + public KafkaMergedFlushExMatcherBuilder capabilities( + String capabilities) + { + this.capabilities = KafkaCapabilities.valueOf(capabilities); + return this; + } + public KafkaMergedFlushExMatcherBuilder progress( int partitionId, long offset) @@ -3476,12 +3484,19 @@ private boolean match( KafkaFlushExFW flushEx) { final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); - return matchProgress(mergedFlushEx) && + return matchCapabilities(mergedFlushEx) && + matchProgress(mergedFlushEx) && matchKey(mergedFlushEx) && matchPartition(mergedFlushEx) && matchFilters(mergedFlushEx); } + private boolean matchCapabilities( + final KafkaMergedFlushExFW mergedFlushEx) + { + return capabilities == null || capabilities.equals(mergedFlushEx.capabilities().get()); + } + private boolean matchProgress( final KafkaMergedFlushExFW mergedFlushEx) { @@ -4019,6 +4034,7 @@ public final class KafkaMergedBeginExMatcherBuilder { private KafkaCapabilities capabilities; private String16FW topic; + private String16FW groupId; private Array32FW.Builder partitionsRW; private KafkaIsolation isolation; private KafkaDeltaType deltaType; @@ -4045,6 +4061,13 @@ public KafkaMergedBeginExMatcherBuilder topic( return this; } + public KafkaMergedBeginExMatcherBuilder groupId( + String groupId) + { + this.groupId = new String16FW(groupId); + return this; + } + public KafkaMergedBeginExMatcherBuilder partition( int partitionId, long offset) @@ -4145,6 +4168,7 @@ private boolean match( final KafkaMergedBeginExFW mergedBeginEx = beginEx.merged(); return matchCapabilities(mergedBeginEx) && matchTopic(mergedBeginEx) && + matchGroupId(mergedBeginEx) && matchPartitions(mergedBeginEx) && matchFilters(mergedBeginEx) && matchIsolation(mergedBeginEx) && @@ -4165,6 +4189,12 @@ private boolean matchTopic( return topic == null || topic.equals(mergedBeginEx.topic()); } + private boolean matchGroupId( + final KafkaMergedBeginExFW mergedBeginEx) + { + return groupId == null || groupId.equals(mergedBeginEx.groupId()); + } + private boolean matchPartitions( final KafkaMergedBeginExFW mergedBeginEx) { diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index 6be4ef9041..b65916d815 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -46,6 +46,7 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.Array32FW; import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaAckMode; +import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaCapabilities; import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaDeltaFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaDeltaType; import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaEvaluation; @@ -4303,6 +4304,7 @@ public void shouldMatchMergedFlushExtension() throws Exception .merged() .partition(1, 2) .progress(0, 1L) + .capabilities("FETCH_ONLY") .key("key") .build() .build(); @@ -4315,6 +4317,7 @@ public void shouldMatchMergedFlushExtension() throws Exception .progressItem(p -> p .partitionId(0) .partitionOffset(1L)) + .capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)) .key(k -> k.length(3).value(v -> v.set("key".getBytes(UTF_8))))) .build(); From eb9c8a33492bd9b353c82e0441e194d1e88047d0 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 31 Aug 2023 11:33:27 -0700 Subject: [PATCH 070/115] Ignore old version of guicedee commons-collections --- .github/dependabot.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index f6887e2230..91ad9f0f88 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,6 +5,8 @@ updates: ignore: - dependency-name: "org.kaazing:k3po.*" versions: [ "4.x", "5.x" ] + - dependency-name: "com.guicedee.services:commons-collections4" + versions: [ "62" ] schedule: interval: daily - package-ecosystem: docker From d812e8d25599719223565b17d1211b289a9d2173 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 31 Aug 2023 11:37:27 -0700 Subject: [PATCH 071/115] Ignore old version of guicedee commons --- .github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 91ad9f0f88..77041210ee 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,7 @@ updates: ignore: - dependency-name: "org.kaazing:k3po.*" versions: [ "4.x", "5.x" ] - - dependency-name: "com.guicedee.services:commons-collections4" + - dependency-name: "com.guicedee.services:commons-*" versions: [ "62" ] schedule: interval: daily From eed850901ab5dc8ba25c15dd40d84c9efdbc18f7 Mon Sep 17 00:00:00 2001 From: Attila Kreiner Date: Thu, 31 Aug 2023 20:59:59 +0200 Subject: [PATCH 072/115] Generate zilla.yaml from an AsyncAPI definition (#369) --- incubator/command-config/pom.xml | 4 + .../internal/airline/ConfigGenerator.java | 21 +- .../internal/airline/ZillaConfigCommand.java | 6 +- .../AsyncApiHttpProxyConfigGenerator.java | 504 ++++++++++++++++++ .../internal/asyncapi/model/AsyncApi.java | 25 + .../internal/asyncapi/model/Binding.java | 20 + .../internal/asyncapi/model/Channel.java | 25 + .../internal/asyncapi/model/Components.java | 25 + .../config/internal/asyncapi/model/Item.java | 20 + .../internal/asyncapi/model/Message.java | 20 + .../internal/asyncapi/model/Operation.java | 23 + .../internal/asyncapi/model/Schema.java | 22 + .../asyncapi/model/SecurityScheme.java | 20 + .../internal/asyncapi/model/Server.java | 24 + .../internal/asyncapi/view/ChannelView.java | 44 ++ .../internal/asyncapi/view/Resolvable.java | 47 ++ .../internal/asyncapi/view/ServerView.java | 58 ++ .../OpenApiHttpProxyConfigGenerator.java | 51 +- .../PathItem2.java => view/PathView.java} | 10 +- .../Server2.java => view/ServerView.java} | 10 +- .../src/main/moditect/module-info.java | 2 + .../AsyncApiHttpProxyConfigGeneratorTest.java | 95 ++++ .../http/proxy/complete/asyncapi.yaml | 98 ++++ .../asyncapi/http/proxy/complete/zilla.yaml | 129 +++++ .../asyncapi/http/proxy/jwt/asyncapi.yaml | 94 ++++ .../asyncapi/http/proxy/jwt/zilla.yaml | 70 +++ .../asyncapi/http/proxy/plain/asyncapi.yaml | 83 +++ .../asyncapi/http/proxy/plain/zilla.yaml | 43 ++ .../asyncapi/http/proxy/tls/asyncapi.yaml | 83 +++ .../asyncapi/http/proxy/tls/zilla.yaml | 83 +++ .../openapi/http/proxy/complete/zilla.yaml | 2 +- .../openapi/http/proxy/tls/zilla.yaml | 2 +- 32 files changed, 1717 insertions(+), 46 deletions(-) create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGenerator.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/AsyncApi.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Binding.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Channel.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Components.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Item.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Message.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Operation.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Schema.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/SecurityScheme.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Server.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/ChannelView.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/Resolvable.java create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/ServerView.java rename incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/{model2/PathItem2.java => view/PathView.java} (93%) rename incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/{model2/Server2.java => view/ServerView.java} (87%) create mode 100644 incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGeneratorTest.java create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/complete/asyncapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/complete/zilla.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/jwt/asyncapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/jwt/zilla.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/plain/asyncapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/plain/zilla.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/tls/asyncapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/tls/zilla.yaml diff --git a/incubator/command-config/pom.xml b/incubator/command-config/pom.xml index abffc062e9..fa369b72cc 100644 --- a/incubator/command-config/pom.xml +++ b/incubator/command-config/pom.xml @@ -98,6 +98,7 @@ src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/**/* + src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/**/* @@ -127,6 +128,9 @@ io/aklivity/zilla/runtime/command/config/internal/types/**/*.class io/aklivity/zilla/runtime/command/config/internal/openapi/model/*.class + io/aklivity/zilla/runtime/command/config/internal/openapi/model2/*.class + io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/*.class + io/aklivity/zilla/runtime/command/config/internal/asyncapi/model2/*.class diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ConfigGenerator.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ConfigGenerator.java index 06462e5112..aa2f1ddc19 100644 --- a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ConfigGenerator.java +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ConfigGenerator.java @@ -14,7 +14,24 @@ */ package io.aklivity.zilla.runtime.command.config.internal.airline; -public interface ConfigGenerator +import java.util.List; +import java.util.regex.Pattern; + +public abstract class ConfigGenerator { - String generate(); + public abstract String generate(); + + protected final String unquoteEnvVars( + String yaml, + List unquotedEnvVars) + { + for (String envVar : unquotedEnvVars) + { + yaml = yaml.replaceAll( + Pattern.quote(String.format("\"${{env.%s}}\"", envVar)), + String.format("\\${{env.%s}}", envVar) + ); + } + return yaml; + } } diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java index c953e894ec..095acbd193 100644 --- a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java @@ -30,19 +30,21 @@ import com.github.rvesse.airline.annotations.restrictions.Required; import io.aklivity.zilla.runtime.command.ZillaCommand; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.http.proxy.AsyncApiHttpProxyConfigGenerator; import io.aklivity.zilla.runtime.command.config.internal.openapi.http.proxy.OpenApiHttpProxyConfigGenerator; @Command(name = "config", description = "Generate configuration file") public final class ZillaConfigCommand extends ZillaCommand { private static final Map> GENERATORS = Map.of( - "openapi.http.proxy", OpenApiHttpProxyConfigGenerator::new + "openapi.http.proxy", OpenApiHttpProxyConfigGenerator::new, + "asyncapi.http.proxy", AsyncApiHttpProxyConfigGenerator::new ); @Option(name = {"-t", "--template"}, description = "Template name") @Required - @AllowedValues(allowedValues = {"openapi.http.proxy"}) + @AllowedValues(allowedValues = {"openapi.http.proxy", "asyncapi.http.proxy"}) public String template; @Option(name = {"-i", "--input"}, diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGenerator.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGenerator.java new file mode 100644 index 0000000000..0d0276ec12 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGenerator.java @@ -0,0 +1,504 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.http.proxy; + +import static io.aklivity.zilla.runtime.binding.http.config.HttpPolicyConfig.CROSS_ORIGIN; +import static io.aklivity.zilla.runtime.engine.config.KindConfig.CLIENT; +import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; +import static java.util.Objects.requireNonNull; +import static org.agrona.LangUtil.rethrowUnchecked; + +import java.io.InputStream; +import java.net.URI; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import jakarta.json.Json; +import jakarta.json.JsonPatch; +import jakarta.json.JsonPatchBuilder; +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; + +import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfig; +import io.aklivity.zilla.runtime.binding.http.config.HttpOptionsConfigBuilder; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfig; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; +import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfig; +import io.aklivity.zilla.runtime.command.config.internal.airline.ConfigGenerator; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.AsyncApi; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.Item; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.Message; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.Operation; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.Server; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.view.ChannelView; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.view.ServerView; +import io.aklivity.zilla.runtime.engine.config.BindingConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.ConfigWriter; +import io.aklivity.zilla.runtime.engine.config.GuardedConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; +import io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.RouteConfigBuilder; +import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; + +public class AsyncApiHttpProxyConfigGenerator extends ConfigGenerator +{ + private final InputStream input; + + private AsyncApi asyncApi; + private int[] allPorts; + private int[] httpPorts; + private int[] httpsPorts; + private boolean isPlainEnabled; + private boolean isTlsEnabled; + private Map securitySchemes; + private String authorizationHeader; + private boolean isJwtEnabled; + + public AsyncApiHttpProxyConfigGenerator( + InputStream input) + { + this.input = input; + } + + @Override + public String generate() + { + this.asyncApi = parseAsyncApi(input); + this.allPorts = resolveAllPorts(); + this.httpPorts = resolvePortsForScheme("http"); + this.httpsPorts = resolvePortsForScheme("https"); + this.isPlainEnabled = httpPorts != null; + this.isTlsEnabled = httpsPorts != null; + this.securitySchemes = resolveSecuritySchemes(); + this.authorizationHeader = resolveAuthorizationHeader(); + this.isJwtEnabled = !securitySchemes.isEmpty(); + ConfigWriter configWriter = new ConfigWriter(null); + String yaml = configWriter.write(createNamespace(), createEnvVarsPatch()); + return unquoteEnvVars(yaml, unquotedEnvVars()); + } + + private AsyncApi parseAsyncApi( + InputStream inputStream) + { + AsyncApi asyncApi = null; + try (Jsonb jsonb = JsonbBuilder.create()) + { + asyncApi = jsonb.fromJson(inputStream, AsyncApi.class); + } + catch (Exception ex) + { + rethrowUnchecked(ex); + } + return asyncApi; + } + + private int[] resolveAllPorts() + { + int[] ports = new int[asyncApi.servers.size()]; + String[] keys = asyncApi.servers.keySet().toArray(String[]::new); + for (int i = 0; i < asyncApi.servers.size(); i++) + { + ServerView server = ServerView.of(asyncApi.servers.get(keys[i])); + URI url = server.url(); + ports[i] = url.getPort(); + } + return ports; + } + + private int[] resolvePortsForScheme( + String scheme) + { + requireNonNull(scheme); + int[] ports = null; + URI url = findFirstServerUrlWithScheme(scheme); + if (url != null) + { + ports = new int[] {url.getPort()}; + } + return ports; + } + + private URI findFirstServerUrlWithScheme( + String scheme) + { + requireNonNull(scheme); + URI result = null; + for (String key : asyncApi.servers.keySet()) + { + ServerView server = ServerView.of(asyncApi.servers.get(key)); + if (scheme.equals(server.url().getScheme())) + { + result = server.url(); + break; + } + } + return result; + } + + private Map resolveSecuritySchemes() + { + requireNonNull(asyncApi); + Map result = new HashMap<>(); + if (asyncApi.components != null && asyncApi.components.securitySchemes != null) + { + for (String securitySchemeName : asyncApi.components.securitySchemes.keySet()) + { + String guardType = asyncApi.components.securitySchemes.get(securitySchemeName).bearerFormat; + if ("jwt".equals(guardType)) + { + result.put(securitySchemeName, guardType); + } + } + } + return result; + } + + private String resolveAuthorizationHeader() + { + requireNonNull(asyncApi); + requireNonNull(asyncApi.components); + requireNonNull(asyncApi.components.messages); + String result = null; + for (Map.Entry entry: asyncApi.components.messages.entrySet()) + { + Message message = entry.getValue(); + if (message.headers != null && message.headers.properties != null) + { + Item authorization = message.headers.properties.get("authorization"); + if (authorization != null) + { + result = authorization.description; + break; + } + } + } + return result; + } + + private NamespaceConfig createNamespace() + { + return NamespaceConfig.builder() + .name("example") + .binding() + .name("tcp_server0") + .type("tcp") + .kind(SERVER) + .options(TcpOptionsConfig::builder) + .host("0.0.0.0") + .ports(allPorts) + .build() + .inject(this::injectPlainTcpRoute) + .inject(this::injectTlsTcpRoute) + .build() + .inject(this::injectTlsServer) + .binding() + .name("http_server0") + .type("http") + .kind(SERVER) + .options(HttpOptionsConfig::builder) + .access() + .policy(CROSS_ORIGIN) + .build() + .inject(this::injectHttpServerOptions) + .build() + .inject(this::injectHttpServerRoutes) + .build() + .binding() + .name("http_client0") + .type("http") + .kind(CLIENT) + .exit(isTlsEnabled ? "tls_client0" : "tcp_client0") + .build() + .inject(this::injectTlsClient) + .binding() + .name("tcp_client0") + .type("tcp") + .kind(CLIENT) + .options(TcpOptionsConfig::builder) + .host("") // env + .ports(new int[]{0}) // env + .build() + .build() + .inject(this::injectGuard) + .inject(this::injectVaults) + .build(); + } + + private BindingConfigBuilder> injectPlainTcpRoute( + BindingConfigBuilder> binding) + { + if (isPlainEnabled) + { + binding + .route() + .when(TcpConditionConfig::builder) + .ports(httpPorts) + .build() + .exit("http_server0") + .build(); + } + return binding; + } + + private BindingConfigBuilder> injectTlsTcpRoute( + BindingConfigBuilder> binding) + { + if (isTlsEnabled) + { + binding + .route() + .when(TcpConditionConfig::builder) + .ports(httpsPorts) + .build() + .exit("tls_server0") + .build(); + } + return binding; + } + + private NamespaceConfigBuilder injectTlsServer( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .binding() + .name("tls_server0") + .type("tls") + .kind(SERVER) + .options(TlsOptionsConfig::builder) + .keys(List.of("")) // env + .sni(List.of("")) // env + .alpn(List.of("")) // env + .build() + .vault("server") + .exit("http_server0") + .build(); + } + return namespace; + } + + private HttpOptionsConfigBuilder>> injectHttpServerOptions( + HttpOptionsConfigBuilder>> options) + { + if (isJwtEnabled) + { + options + .authorization() + .name("jwt0") + .credentials() + .header() + .name("authorization") + .pattern(authorizationHeader) + .build() + .build() + .build(); + } + return options; + } + + private BindingConfigBuilder> injectHttpServerRoutes( + BindingConfigBuilder> binding) + { + for (Map.Entry entry : asyncApi.servers.entrySet()) + { + ServerView server = ServerView.of(entry.getValue()); + for (String name : asyncApi.operations.keySet()) + { + Operation operation = asyncApi.operations.get(name); + ChannelView channel = ChannelView.of(asyncApi.channels, operation.channel); + String path = channel.address().replaceAll("\\{[^}]+\\}", "*"); + String method = operation.bindings.get("http").method; + binding + .route() + .exit("http_client0") + .when(HttpConditionConfig::builder) + .header(":scheme", server.scheme()) + .header(":authority", server.authority()) + .header(":path", path) + .header(":method", method) + .build() + .inject(route -> injectHttpServerRouteGuarded(route, server)) + .build(); + } + } + return binding; + } + + private RouteConfigBuilder injectHttpServerRouteGuarded( + RouteConfigBuilder route, + ServerView server) + { + if (server.security() != null) + { + for (Map> securityItem : server.security()) + { + for (String securityItemLabel : securityItem.keySet()) + { + if (isJwtEnabled && "jwt".equals(securitySchemes.get(securityItemLabel))) + { + route + .guarded() + .name("jwt0") + .inject(guarded -> injectGuardedRoles(guarded, securityItem.get(securityItemLabel))) + .build(); + break; + } + } + } + } + return route; + } + + private GuardedConfigBuilder injectGuardedRoles( + GuardedConfigBuilder guarded, + List roles) + { + for (String role : roles) + { + guarded.role(role); + } + return guarded; + } + + private NamespaceConfigBuilder injectTlsClient( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .binding() + .name("tls_client0") + .type("tls") + .kind(CLIENT) + .options(TlsOptionsConfig::builder) + .trust(List.of("")) // env + .sni(List.of("")) // env + .alpn(List.of("")) // env + .trustcacerts(true) + .build() + .vault("client") + .exit("tcp_client0") + .build(); + } + return namespace; + } + + private NamespaceConfigBuilder injectGuard( + NamespaceConfigBuilder namespace) + { + if (isJwtEnabled) + { + namespace + .guard() + .name("jwt0") + .type("jwt") + .options(JwtOptionsConfig::builder) + .issuer("") // env + .audience("") // env + .key() + .alg("").kty("").kid("").use("").n("").e("").crv("").x("").y("") // env + .build() + .build() + .build(); + } + return namespace; + } + + private NamespaceConfigBuilder injectVaults( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .vault() + .name("client") + .type("filesystem") + .options(FileSystemOptionsConfig::builder) + .trust() + .store("") // env + .type("") // env + .password("") // env + .build() + .build() + .build() + .vault() + .name("server") + .type("filesystem") + .options(FileSystemOptionsConfig::builder) + .keys() + .store("") // env + .type("") // env + .password("") //env + .build() + .build() + .build(); + } + return namespace; + } + + private JsonPatch createEnvVarsPatch() + { + JsonPatchBuilder patch = Json.createPatchBuilder(); + patch.replace("/bindings/tcp_client0/options/host", "${{env.TCP_CLIENT_HOST}}"); + patch.replace("/bindings/tcp_client0/options/port", "${{env.TCP_CLIENT_PORT}}"); + + if (isJwtEnabled) + { + // jwt0 guard + patch.replace("/guards/jwt0/options/issuer", "${{env.JWT_ISSUER}}"); + patch.replace("/guards/jwt0/options/audience", "${{env.JWT_AUDIENCE}}"); + patch.replace("/guards/jwt0/options/keys/0/alg", "${{env.JWT_ALG}}"); + patch.replace("/guards/jwt0/options/keys/0/kty", "${{env.JWT_KTY}}"); + patch.replace("/guards/jwt0/options/keys/0/kid", "${{env.JWT_KID}}"); + patch.replace("/guards/jwt0/options/keys/0/use", "${{env.JWT_USE}}"); + patch.replace("/guards/jwt0/options/keys/0/n", "${{env.JWT_N}}"); + patch.replace("/guards/jwt0/options/keys/0/e", "${{env.JWT_E}}"); + patch.replace("/guards/jwt0/options/keys/0/crv", "${{env.JWT_CRV}}"); + patch.replace("/guards/jwt0/options/keys/0/x", "${{env.JWT_X}}"); + patch.replace("/guards/jwt0/options/keys/0/y", "${{env.JWT_Y}}"); + } + + if (isTlsEnabled) + { + // tls_server0 binding + patch.replace("/bindings/tls_server0/options/keys/0", "${{env.TLS_SERVER_KEY}}"); + patch.replace("/bindings/tls_server0/options/sni/0", "${{env.TLS_SERVER_SNI}}"); + patch.replace("/bindings/tls_server0/options/alpn/0", "${{env.TLS_SERVER_ALPN}}"); + // tls_client0 binding + patch.replace("/bindings/tls_client0/options/trust/0", "${{env.TLS_CLIENT_TRUST}}"); + patch.replace("/bindings/tls_client0/options/sni/0", "${{env.TLS_CLIENT_SNI}}"); + patch.replace("/bindings/tls_client0/options/alpn/0", "${{env.TLS_CLIENT_ALPN}}"); + // client vault + patch.replace("/vaults/client/options/trust/store", "${{env.TRUSTSTORE_PATH}}"); + patch.replace("/vaults/client/options/trust/type", "${{env.TRUSTSTORE_TYPE}}"); + patch.replace("/vaults/client/options/trust/password", "${{env.TRUSTSTORE_PASSWORD}}"); + // server vault + patch.replace("/vaults/server/options/keys/store", "${{env.KEYSTORE_PATH}}"); + patch.replace("/vaults/server/options/keys/type", "${{env.KEYSTORE_TYPE}}"); + patch.replace("/vaults/server/options/keys/password", "${{env.KEYSTORE_PASSWORD}}"); + } + + return patch.build(); + } + + private List unquotedEnvVars() + { + return List.of("TCP_CLIENT_PORT"); + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/AsyncApi.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/AsyncApi.java new file mode 100644 index 0000000000..512addcdea --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/AsyncApi.java @@ -0,0 +1,25 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +import java.util.Map; + +public class AsyncApi +{ + public Map servers; + public Map channels; + public Map operations; + public Components components; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Binding.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Binding.java new file mode 100644 index 0000000000..b008294c42 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Binding.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +public class Binding +{ + public String method; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Channel.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Channel.java new file mode 100644 index 0000000000..990dadf0fc --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Channel.java @@ -0,0 +1,25 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +import jakarta.json.bind.annotation.JsonbProperty; + +public class Channel +{ + public String address; + + @JsonbProperty("$ref") + public String ref; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Components.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Components.java new file mode 100644 index 0000000000..c138c91af3 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Components.java @@ -0,0 +1,25 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +import java.util.Map; + +import io.aklivity.zilla.runtime.command.config.internal.openapi.model.SecurityScheme; + +public class Components +{ + public Map securitySchemes; + public Map messages; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Item.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Item.java new file mode 100644 index 0000000000..05ad9b5d35 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Item.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +public class Item +{ + public String description; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Message.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Message.java new file mode 100644 index 0000000000..c73553c3b6 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Message.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +public class Message +{ + public Schema headers; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Operation.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Operation.java new file mode 100644 index 0000000000..8bf99ce810 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Operation.java @@ -0,0 +1,23 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +import java.util.Map; + +public class Operation +{ + public Map bindings; + public Channel channel; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Schema.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Schema.java new file mode 100644 index 0000000000..e05ef9ab7d --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Schema.java @@ -0,0 +1,22 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +import java.util.Map; + +public class Schema +{ + public Map properties; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/SecurityScheme.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/SecurityScheme.java new file mode 100644 index 0000000000..c57c946e63 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/SecurityScheme.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +public class SecurityScheme +{ + public String bearerFormat; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Server.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Server.java new file mode 100644 index 0000000000..eed9d65dc4 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/model/Server.java @@ -0,0 +1,24 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + +import java.util.List; +import java.util.Map; + +public class Server +{ + public String host; + public List>> security; +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/ChannelView.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/ChannelView.java new file mode 100644 index 0000000000..a9b4832994 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/ChannelView.java @@ -0,0 +1,44 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.view; + +import java.util.Map; + +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.Channel; + +public final class ChannelView extends Resolvable +{ + private final Channel channel; + + private ChannelView( + Map channels, + Channel channel) + { + super(channels, "#/channels/(\\w+)"); + this.channel = channel.ref == null ? channel : resolveRef(channel.ref); + } + + public String address() + { + return channel.address; + } + + public static ChannelView of( + Map channels, + Channel channel) + { + return new ChannelView(channels, channel); + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/Resolvable.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/Resolvable.java new file mode 100644 index 0000000000..101dcfcb13 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/Resolvable.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.view; + +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public abstract class Resolvable +{ + private final Map map; + private final String regex; + + public Resolvable( + Map map, + String regex) + { + this.map = map; + this.regex = regex; + } + + protected T resolveRef( + String ref) + { + T result = null; + Pattern pattern = Pattern.compile(regex); + Matcher matcher = pattern.matcher(ref); + if (matcher.matches()) + { + String key = matcher.group(1); + result = map.get(key); + } + return result; + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/ServerView.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/ServerView.java new file mode 100644 index 0000000000..b89742dc5f --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/view/ServerView.java @@ -0,0 +1,58 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.view; + +import java.net.URI; +import java.util.List; +import java.util.Map; + +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.Server; + +public final class ServerView +{ + private final Server server; + + private ServerView( + Server server) + { + this.server = server; + } + + public URI url() + { + return URI.create(server.host); + } + + public List>> security() + { + return server.security; + } + + public String scheme() + { + return url().getScheme(); + } + + public String authority() + { + return String.format("%s:%d", url().getHost(), url().getPort()); + } + + public static ServerView of( + Server server) + { + return new ServerView(server); + } +} diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java index 352820f39a..a090a1ccaa 100644 --- a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/OpenApiHttpProxyConfigGenerator.java @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.regex.Pattern; import jakarta.json.Json; import jakarta.json.JsonPatch; @@ -42,8 +41,8 @@ import io.aklivity.zilla.runtime.command.config.internal.airline.ConfigGenerator; import io.aklivity.zilla.runtime.command.config.internal.openapi.model.OpenApi; import io.aklivity.zilla.runtime.command.config.internal.openapi.model.Server; -import io.aklivity.zilla.runtime.command.config.internal.openapi.model2.PathItem2; -import io.aklivity.zilla.runtime.command.config.internal.openapi.model2.Server2; +import io.aklivity.zilla.runtime.command.config.internal.openapi.view.PathView; +import io.aklivity.zilla.runtime.command.config.internal.openapi.view.ServerView; import io.aklivity.zilla.runtime.engine.config.BindingConfigBuilder; import io.aklivity.zilla.runtime.engine.config.ConfigWriter; import io.aklivity.zilla.runtime.engine.config.GuardedConfigBuilder; @@ -53,7 +52,7 @@ import io.aklivity.zilla.runtime.guard.jwt.config.JwtOptionsConfig; import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; -public class OpenApiHttpProxyConfigGenerator implements ConfigGenerator +public class OpenApiHttpProxyConfigGenerator extends ConfigGenerator { private final InputStream inputStream; @@ -72,6 +71,7 @@ public OpenApiHttpProxyConfigGenerator( this.inputStream = inputStream; } + @Override public String generate() { this.openApi = parseOpenApi(inputStream); @@ -84,7 +84,7 @@ public String generate() this.isJwtEnabled = !securitySchemes.isEmpty(); ConfigWriter configWriter = new ConfigWriter(null); String yaml = configWriter.write(createNamespace(), createEnvVarsPatch()); - return unquoteEnvVars(yaml); + return unquoteEnvVars(yaml, unquotedEnvVars()); } private OpenApi parseOpenApi( @@ -107,8 +107,8 @@ private int[] resolveAllPorts() int[] ports = new int[openApi.servers.size()]; for (int i = 0; i < openApi.servers.size(); i++) { - Server2 server2 = Server2.of(openApi.servers.get(i)); - URI url = server2.url(); + ServerView server = ServerView.of(openApi.servers.get(i)); + URI url = server.url(); ports[i] = url.getPort(); } return ports; @@ -132,12 +132,12 @@ private URI findFirstServerUrlWithScheme( { requireNonNull(scheme); URI result = null; - for (Server server : openApi.servers) + for (Server item : openApi.servers) { - Server2 server2 = Server2.of(server); - if (scheme.equals(server2.url().getScheme())) + ServerView server = ServerView.of(item); + if (scheme.equals(server.url().getScheme())) { - result = server2.url(); + result = server.url(); break; } } @@ -287,19 +287,19 @@ private HttpOptionsConfigBuilder> injectHttpServerRoutes( BindingConfigBuilder> binding) { - for (String path : openApi.paths.keySet()) + for (String item : openApi.paths.keySet()) { - PathItem2 item = PathItem2.of(openApi.paths.get(path)); - for (String method : item.methods().keySet()) + PathView path = PathView.of(openApi.paths.get(item)); + for (String method : path.methods().keySet()) { binding .route() .exit("http_client0") .when(HttpConditionConfig::builder) - .header(":path", path.replaceAll("\\{[^}]+\\}", "*")) + .header(":path", item.replaceAll("\\{[^}]+\\}", "*")) .header(":method", method) .build() - .inject(route -> injectHttpServerRouteGuarded(route, item, method)) + .inject(route -> injectHttpServerRouteGuarded(route, path, method)) .build(); } } @@ -308,10 +308,10 @@ private BindingConfigBuilder> injectHttp private RouteConfigBuilder>> injectHttpServerRouteGuarded( RouteConfigBuilder>> route, - PathItem2 item, + PathView path, String method) { - List>> security = item.methods().get(method).security; + List>> security = path.methods().get(method).security; if (security != null) { for (Map> securityItem : security) @@ -444,7 +444,7 @@ private JsonPatch createEnvVarsPatch() if (isTlsEnabled) { // tls_server0 binding - patch.replace("/bindings/tls_server0/options/keys/0", "${{env.TLS_SERVER_KEYS}}"); + patch.replace("/bindings/tls_server0/options/keys/0", "${{env.TLS_SERVER_KEY}}"); patch.replace("/bindings/tls_server0/options/sni/0", "${{env.TLS_SERVER_SNI}}"); patch.replace("/bindings/tls_server0/options/alpn/0", "${{env.TLS_SERVER_ALPN}}"); // tls_client0 binding @@ -464,17 +464,8 @@ private JsonPatch createEnvVarsPatch() return patch.build(); } - private String unquoteEnvVars( - String yaml) + private List unquotedEnvVars() { - List unquotedEnvVars = List.of("TCP_CLIENT_PORT"); - for (String envVar : unquotedEnvVars) - { - yaml = yaml.replaceAll( - Pattern.quote(String.format("\"${{env.%s}}\"", envVar)), - String.format("\\${{env.%s}}", envVar) - ); - } - return yaml; + return List.of("TCP_CLIENT_PORT"); } } diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/PathItem2.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/view/PathView.java similarity index 93% rename from incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/PathItem2.java rename to incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/view/PathView.java index 504ab65f23..f286508307 100644 --- a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/PathItem2.java +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/view/PathView.java @@ -12,7 +12,7 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.command.config.internal.openapi.model2; +package io.aklivity.zilla.runtime.command.config.internal.openapi.view; import java.util.LinkedHashMap; import java.util.Map; @@ -20,11 +20,11 @@ import io.aklivity.zilla.runtime.command.config.internal.openapi.model.Operation; import io.aklivity.zilla.runtime.command.config.internal.openapi.model.PathItem; -public class PathItem2 +public class PathView { private final LinkedHashMap methods; - public PathItem2( + public PathView( PathItem pathItem) { this.methods = new LinkedHashMap<>(); @@ -43,10 +43,10 @@ public Map methods() return methods; } - public static PathItem2 of( + public static PathView of( PathItem pathItem) { - return new PathItem2(pathItem); + return new PathView(pathItem); } private static void putIfNotNull( diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/Server2.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/view/ServerView.java similarity index 87% rename from incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/Server2.java rename to incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/view/ServerView.java index c37692435b..4d028a170e 100644 --- a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/model2/Server2.java +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/openapi/view/ServerView.java @@ -12,17 +12,17 @@ * WARRANTIES OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package io.aklivity.zilla.runtime.command.config.internal.openapi.model2; +package io.aklivity.zilla.runtime.command.config.internal.openapi.view; import java.net.URI; import io.aklivity.zilla.runtime.command.config.internal.openapi.model.Server; -public final class Server2 +public final class ServerView { private URI url; - private Server2( + private ServerView( Server server) { this.url = URI.create(server.url); @@ -33,9 +33,9 @@ public URI url() return url; } - public static Server2 of( + public static ServerView of( Server server) { - return new Server2(server); + return new ServerView(server); } } diff --git a/incubator/command-config/src/main/moditect/module-info.java b/incubator/command-config/src/main/moditect/module-info.java index 8885ef4672..014b16646a 100644 --- a/incubator/command-config/src/main/moditect/module-info.java +++ b/incubator/command-config/src/main/moditect/module-info.java @@ -27,6 +27,8 @@ opens io.aklivity.zilla.runtime.command.config.internal.openapi.model; + opens io.aklivity.zilla.runtime.command.config.internal.asyncapi.model; + provides io.aklivity.zilla.runtime.command.ZillaCommandSpi with io.aklivity.zilla.runtime.command.config.internal.ZillaConfigCommandSpi; } diff --git a/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGeneratorTest.java b/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGeneratorTest.java new file mode 100644 index 0000000000..8618b39a45 --- /dev/null +++ b/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/AsyncApiHttpProxyConfigGeneratorTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.http.proxy; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import org.junit.jupiter.api.Test; + +public class AsyncApiHttpProxyConfigGeneratorTest +{ + @Test + public void shouldGeneratePlainConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("plain/asyncapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("plain/zilla.yaml").getFile())); + AsyncApiHttpProxyConfigGenerator generator = new AsyncApiHttpProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } + + @Test + public void shouldGenerateJwtConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("jwt/asyncapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("jwt/zilla.yaml").getFile())); + AsyncApiHttpProxyConfigGenerator generator = new AsyncApiHttpProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } + + @Test + public void shouldGenerateTlsConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("tls/asyncapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("tls/zilla.yaml").getFile())); + AsyncApiHttpProxyConfigGenerator generator = new AsyncApiHttpProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } + + @Test + public void shouldGenerateCompleteConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("complete/asyncapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("complete/zilla.yaml").getFile())); + AsyncApiHttpProxyConfigGenerator generator = new AsyncApiHttpProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } +} diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/complete/asyncapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/complete/asyncapi.yaml new file mode 100644 index 0000000000..d2e61e7de5 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/complete/asyncapi.yaml @@ -0,0 +1,98 @@ +asyncapi: 3.0.0 +info: + title: HTTP Zilla Proxy + version: 1.0.0 + license: + name: Aklivity Community License +servers: + secure: + host: https://localhost:9090 + protocol: http + protocolVersion: '1.1' + security: + - httpBearerToken: + - public + plain: + host: http://localhost:8080 + protocol: http + protocolVersion: '1.1' +defaultContentType: application/json + +channels: + items: + address: /items + messages: + items: + $ref: '#/components/messages/item' + itemsbyid: + address: /items/{id} + parameters: + id: + description: Event ID. + schema: + type: string + messages: + items: + $ref: '#/components/messages/item' + +operations: + postEvents: + action: send + bindings: + http: + type: request + method: POST + channel: + $ref: '#/channels/items' + getEvents: + action: receive + bindings: + http: + type: request + method: GET + query: + type: object + properties: + limit: + type: number + channel: + $ref: '#/channels/itemsbyid' + +components: + correlationIds: + itemsCorrelationId: + location: '$message.header#/idempotency-key' + messages: + item: + name: event + title: An event + correlationId: + $ref: "#/components/correlationIds/itemsCorrelationId" + headers: + type: object + properties: + idempotency-key: + description: Unique identifier for a given event + type: string + authorization: + description: Bearer {credentials} + type: string + contentType: application/json + payload: + type: object + properties: + item: + $ref: "#/components/schemas/item" + schemas: + item: + type: object + properties: + greeting: + type: string + required: + - greeting + securitySchemes: + httpBearerToken: + type: http + scheme: bearer + bearerFormat: jwt diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/complete/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/complete/zilla.yaml new file mode 100644 index 0000000000..acce74ba53 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/complete/zilla.yaml @@ -0,0 +1,129 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 8080 + - 9090 + routes: + - exit: http_server0 + when: + - port: 8080 + - exit: tls_server0 + when: + - port: 9090 + tls_server0: + vault: server + type: tls + kind: server + options: + keys: + - "${{env.TLS_SERVER_KEY}}" + sni: + - "${{env.TLS_SERVER_SNI}}" + alpn: + - "${{env.TLS_SERVER_ALPN}}" + exit: http_server0 + http_server0: + type: http + kind: server + options: + access-control: + policy: cross-origin + authorization: + jwt0: + credentials: + headers: + authorization: "Bearer {credentials}" + routes: + - exit: http_client0 + when: + - headers: + :scheme: http + :authority: localhost:8080 + :path: /items/* + :method: GET + - exit: http_client0 + when: + - headers: + :scheme: http + :authority: localhost:8080 + :path: /items + :method: POST + - exit: http_client0 + when: + - headers: + :scheme: https + :authority: localhost:9090 + :path: /items/* + :method: GET + guarded: + jwt0: + - public + - exit: http_client0 + when: + - headers: + :scheme: https + :authority: localhost:9090 + :path: /items + :method: POST + guarded: + jwt0: + - public + http_client0: + type: http + kind: client + exit: tls_client0 + tls_client0: + vault: client + type: tls + kind: client + options: + trust: + - "${{env.TLS_CLIENT_TRUST}}" + trustcacerts: true + sni: + - "${{env.TLS_CLIENT_SNI}}" + alpn: + - "${{env.TLS_CLIENT_ALPN}}" + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} +guards: + jwt0: + type: jwt + options: + issuer: "${{env.JWT_ISSUER}}" + audience: "${{env.JWT_AUDIENCE}}" + keys: + - kty: "${{env.JWT_KTY}}" + "n": "${{env.JWT_N}}" + e: "${{env.JWT_E}}" + alg: "${{env.JWT_ALG}}" + crv: "${{env.JWT_CRV}}" + x: "${{env.JWT_X}}" + "y": "${{env.JWT_Y}}" + use: "${{env.JWT_USE}}" + kid: "${{env.JWT_KID}}" +vaults: + client: + type: filesystem + options: + trust: + store: "${{env.TRUSTSTORE_PATH}}" + type: "${{env.TRUSTSTORE_TYPE}}" + password: "${{env.TRUSTSTORE_PASSWORD}}" + server: + type: filesystem + options: + keys: + store: "${{env.KEYSTORE_PATH}}" + type: "${{env.KEYSTORE_TYPE}}" + password: "${{env.KEYSTORE_PASSWORD}}" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/jwt/asyncapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/jwt/asyncapi.yaml new file mode 100644 index 0000000000..5be0904148 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/jwt/asyncapi.yaml @@ -0,0 +1,94 @@ +asyncapi: 3.0.0 +info: + title: HTTP Zilla Proxy + version: 1.0.0 + license: + name: Aklivity Community License +servers: + plain: + host: http://localhost:8080 + protocol: http + protocolVersion: '1.1' + security: + - httpBearerToken: + - public +defaultContentType: application/json + +channels: + items: + address: /items + messages: + items: + $ref: '#/components/messages/item' + itemsbyid: + address: /items/{id} + parameters: + id: + description: Event ID. + schema: + type: string + messages: + items: + $ref: '#/components/messages/item' + +operations: + postEvents: + action: send + bindings: + http: + type: request + method: POST + channel: + $ref: '#/channels/items' + getEvents: + action: receive + bindings: + http: + type: request + method: GET + query: + type: object + properties: + limit: + type: number + channel: + $ref: '#/channels/itemsbyid' + +components: + correlationIds: + itemsCorrelationId: + location: '$message.header#/idempotency-key' + messages: + item: + name: event + title: An event + correlationId: + $ref: "#/components/correlationIds/itemsCorrelationId" + headers: + type: object + properties: + idempotency-key: + description: Unique identifier for a given event + type: string + authorization: + description: Bearer {credentials} + type: string + contentType: application/json + payload: + type: object + properties: + item: + $ref: "#/components/schemas/item" + schemas: + item: + type: object + properties: + greeting: + type: string + required: + - greeting + securitySchemes: + httpBearerToken: + type: http + scheme: bearer + bearerFormat: jwt diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/jwt/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/jwt/zilla.yaml new file mode 100644 index 0000000000..880a24f184 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/jwt/zilla.yaml @@ -0,0 +1,70 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: 8080 + routes: + - exit: http_server0 + when: + - port: 8080 + http_server0: + type: http + kind: server + options: + access-control: + policy: cross-origin + authorization: + jwt0: + credentials: + headers: + authorization: "Bearer {credentials}" + routes: + - exit: http_client0 + when: + - headers: + :scheme: http + :authority: localhost:8080 + :path: /items/* + :method: GET + guarded: + jwt0: + - public + - exit: http_client0 + when: + - headers: + :scheme: http + :authority: localhost:8080 + :path: /items + :method: POST + guarded: + jwt0: + - public + http_client0: + type: http + kind: client + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} +guards: + jwt0: + type: jwt + options: + issuer: "${{env.JWT_ISSUER}}" + audience: "${{env.JWT_AUDIENCE}}" + keys: + - kty: "${{env.JWT_KTY}}" + "n": "${{env.JWT_N}}" + e: "${{env.JWT_E}}" + alg: "${{env.JWT_ALG}}" + crv: "${{env.JWT_CRV}}" + x: "${{env.JWT_X}}" + "y": "${{env.JWT_Y}}" + use: "${{env.JWT_USE}}" + kid: "${{env.JWT_KID}}" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/plain/asyncapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/plain/asyncapi.yaml new file mode 100644 index 0000000000..e5b6e54ced --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/plain/asyncapi.yaml @@ -0,0 +1,83 @@ +asyncapi: 3.0.0 +info: + title: HTTP Zilla Proxy + version: 1.0.0 + license: + name: Aklivity Community License +servers: + plain: + host: http://localhost:8080 + protocol: http + protocolVersion: '1.1' +defaultContentType: application/json + +channels: + items: + address: /items + messages: + items: + $ref: '#/components/messages/item' + itemsbyid: + address: /items/{id} + parameters: + id: + description: Event ID. + schema: + type: string + messages: + items: + $ref: '#/components/messages/item' + +operations: + postEvents: + action: send + bindings: + http: + type: request + method: POST + channel: + $ref: '#/channels/items' + getEvents: + action: receive + bindings: + http: + type: request + method: GET + query: + type: object + properties: + limit: + type: number + channel: + $ref: '#/channels/itemsbyid' + +components: + correlationIds: + itemsCorrelationId: + location: '$message.header#/idempotency-key' + messages: + item: + name: event + title: An event + correlationId: + $ref: "#/components/correlationIds/itemsCorrelationId" + headers: + type: object + properties: + idempotency-key: + description: Unique identifier for a given event + type: string + contentType: application/json + payload: + type: object + properties: + item: + $ref: "#/components/schemas/item" + schemas: + item: + type: object + properties: + greeting: + type: string + required: + - greeting diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/plain/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/plain/zilla.yaml new file mode 100644 index 0000000000..d1ff8a09cd --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/plain/zilla.yaml @@ -0,0 +1,43 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: 8080 + routes: + - exit: http_server0 + when: + - port: 8080 + http_server0: + type: http + kind: server + options: + access-control: + policy: cross-origin + routes: + - exit: http_client0 + when: + - headers: + :scheme: http + :authority: localhost:8080 + :path: /items/* + :method: GET + - exit: http_client0 + when: + - headers: + :scheme: http + :authority: localhost:8080 + :path: /items + :method: POST + http_client0: + type: http + kind: client + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/tls/asyncapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/tls/asyncapi.yaml new file mode 100644 index 0000000000..eadce1aeb7 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/tls/asyncapi.yaml @@ -0,0 +1,83 @@ +asyncapi: 3.0.0 +info: + title: HTTP Zilla Proxy + version: 1.0.0 + license: + name: Aklivity Community License +servers: + secure: + host: https://localhost:9090 + protocol: http + protocolVersion: '1.1' +defaultContentType: application/json + +channels: + items: + address: /items + messages: + items: + $ref: '#/components/messages/item' + itemsbyid: + address: /items/{id} + parameters: + id: + description: Event ID. + schema: + type: string + messages: + items: + $ref: '#/components/messages/item' + +operations: + postEvents: + action: send + bindings: + http: + type: request + method: POST + channel: + $ref: '#/channels/items' + getEvents: + action: receive + bindings: + http: + type: request + method: GET + query: + type: object + properties: + limit: + type: number + channel: + $ref: '#/channels/itemsbyid' + +components: + correlationIds: + itemsCorrelationId: + location: '$message.header#/idempotency-key' + messages: + item: + name: event + title: An event + correlationId: + $ref: "#/components/correlationIds/itemsCorrelationId" + headers: + type: object + properties: + idempotency-key: + description: Unique identifier for a given event + type: string + contentType: application/json + payload: + type: object + properties: + item: + $ref: "#/components/schemas/item" + schemas: + item: + type: object + properties: + greeting: + type: string + required: + - greeting diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/tls/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/tls/zilla.yaml new file mode 100644 index 0000000000..7b4138fbe0 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/http/proxy/tls/zilla.yaml @@ -0,0 +1,83 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: 9090 + routes: + - exit: tls_server0 + when: + - port: 9090 + tls_server0: + vault: server + type: tls + kind: server + options: + keys: + - "${{env.TLS_SERVER_KEY}}" + sni: + - "${{env.TLS_SERVER_SNI}}" + alpn: + - "${{env.TLS_SERVER_ALPN}}" + exit: http_server0 + http_server0: + type: http + kind: server + options: + access-control: + policy: cross-origin + routes: + - exit: http_client0 + when: + - headers: + :scheme: https + :authority: localhost:9090 + :path: /items/* + :method: GET + - exit: http_client0 + when: + - headers: + :scheme: https + :authority: localhost:9090 + :path: /items + :method: POST + http_client0: + type: http + kind: client + exit: tls_client0 + tls_client0: + vault: client + type: tls + kind: client + options: + trust: + - "${{env.TLS_CLIENT_TRUST}}" + trustcacerts: true + sni: + - "${{env.TLS_CLIENT_SNI}}" + alpn: + - "${{env.TLS_CLIENT_ALPN}}" + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} +vaults: + client: + type: filesystem + options: + trust: + store: "${{env.TRUSTSTORE_PATH}}" + type: "${{env.TRUSTSTORE_TYPE}}" + password: "${{env.TRUSTSTORE_PASSWORD}}" + server: + type: filesystem + options: + keys: + store: "${{env.KEYSTORE_PATH}}" + type: "${{env.KEYSTORE_TYPE}}" + password: "${{env.KEYSTORE_PASSWORD}}" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/zilla.yaml index 8d3a8b105e..7cb3ade4ad 100644 --- a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/zilla.yaml +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/complete/zilla.yaml @@ -21,7 +21,7 @@ bindings: kind: server options: keys: - - "${{env.TLS_SERVER_KEYS}}" + - "${{env.TLS_SERVER_KEY}}" sni: - "${{env.TLS_SERVER_SNI}}" alpn: diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/zilla.yaml index 492f19f16c..22e4b1c9df 100644 --- a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/zilla.yaml +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/openapi/http/proxy/tls/zilla.yaml @@ -16,7 +16,7 @@ bindings: kind: server options: keys: - - "${{env.TLS_SERVER_KEYS}}" + - "${{env.TLS_SERVER_KEY}}" sni: - "${{env.TLS_SERVER_SNI}}" alpn: From 080e33ab47d8e04bf96b34972d578581812f60ed Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 31 Aug 2023 16:13:01 -0700 Subject: [PATCH 073/115] Request data length is non-negative (#386) --- .../runtime/binding/http/internal/stream/HttpServerFactory.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java index 2357bfffd5..17fd99f7c4 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/stream/HttpServerFactory.java @@ -2732,7 +2732,7 @@ private int doRequestData( Flyweight extension) { int requestNoAck = (int)(requestSeq - requestAck); - int length = Math.min(requestMax - requestNoAck - requestPad, limit - offset); + int length = Math.min(Math.max(requestMax - requestNoAck - requestPad, 0), limit - offset); if (length > 0) { From c4229eac0b0ae32e0c4520fbee3c337a568ac1f4 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Mon, 4 Sep 2023 18:00:58 +0200 Subject: [PATCH 074/115] Mqtt kafka redirect (#381) --- .../streams/kafka/session.redirect/client.rpt | 182 ++++++++++++++++++ .../streams/kafka/session.redirect/server.rpt | 117 +++++++++++ .../streams/mqtt/session.redirect/client.rpt | 35 ++++ .../streams/mqtt/session.redirect/server.rpt | 36 ++++ .../stream/MqttKafkaSessionFactory.java | 60 ++++-- .../stream/MqttKafkaPublishProxyIT.java | 1 + .../stream/MqttKafkaSessionProxyIT.java | 14 ++ .../kafka/internal/KafkaFunctions.java | 22 +++ .../kafka/internal/KafkaFunctionsTest.java | 50 +++++ 9 files changed, 503 insertions(+), 14 deletions(-) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt new file mode 100644 index 0000000000..fa6dac76f0 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt @@ -0,0 +1,182 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_FINISHED + +connect await INIT_MIGRATE_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write notify GROUP_FINISHED + +connect await GROUP_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .consumerId("localhost:1883") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .build() + .build()} +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write notify SESSION_STATE_FINISHED + +connect await SESSION_STATE_FINISHED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("mqtt_messages") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() + .build() + .evaluation("EAGER") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .filters(1) + .partition(0, 1, 2) + .progress(0, 2) + .progress(1, 1) + .key("sensor/one") + .header("zilla:filter", "sensor") + .header("zilla:filter", "one") + .header("zilla:local", "client") + .header("zilla:format", "TEXT") + .build() + .build()} +read "message" + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt new file mode 100644 index 0000000000..0963cda94a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt @@ -0,0 +1,117 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .consumerId("localhost:1883") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .consumerId("localhost:1883") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +write zilla:reset.ext ${kafka:resetEx() + .typeId(zilla:id("kafka")) + .consumerId("localhost:1884") + .build()} +read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt new file mode 100644 index 0000000000..7bcdb45192 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .clientId("client-1") + .serverRef("localhost:1883") + .build() + .build()} + +connected + +read zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .serverRef("localhost:1884") + .build()} +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt new file mode 100644 index 0000000000..daa2e283a9 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client-1") + .serverRef("localhost:1883") + .build() + .build()} + +connected + +write zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .serverRef("localhost:1884") + .build()} +read abort diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index d0f027f3d2..9daec953b9 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -76,8 +76,10 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaGroupDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedFlushExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaResetExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttResetExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.ResetFW; @@ -105,6 +107,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private static final String16FW WILL_SIGNAL_NAME = new String16FW("will-signal"); private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); private static final int DATA_FLAG_COMPLETE = 0x03; + public static final String MQTT_CLIENTS_GROUP_ID = "mqtt-clients"; private static final int SIGNAL_DELIVER_WILL_MESSAGE = 1; private static final int SIGNAL_CONNECT_WILL_STREAM = 2; private static final int SIZE_OF_UUID = 38; @@ -138,7 +141,9 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final MqttWillSignalFW mqttWillSignalRO = new MqttWillSignalFW(); private final MqttWillMessageFW mqttWillRO = new MqttWillMessageFW(); private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); + private final MqttResetExFW.Builder mqttResetExRW = new MqttResetExFW.Builder(); private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); + private final KafkaResetExFW kafkaResetExRO = new KafkaResetExFW(); private final KafkaFlushExFW kafkaFlushExRO = new KafkaFlushExFW(); private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); @@ -298,6 +303,7 @@ private final class MqttSessionProxy private String16FW clientId; private String16FW clientIdMigrate; + private String serverRef; private int sessionExpiryMillis; private int sessionFlags; private int willPadding; @@ -390,6 +396,7 @@ private void onMqttBegin( final int sessionExpiry = mqttSessionBeginEx.expiry(); sessionExpiryMillis = mqttSessionBeginEx.expiry() == 0 ? Integer.MAX_VALUE : (int) SECONDS.toMillis(sessionExpiry); sessionFlags = mqttSessionBeginEx.flags(); + serverRef = mqttSessionBeginEx.serverRef().asString(); if (!isSetWillFlag(sessionFlags) || isSetCleanStart(sessionFlags)) { @@ -828,13 +835,14 @@ private void doMqttWindow( } private void doMqttReset( - long traceId) + long traceId, + Flyweight extension) { if (!MqttKafkaState.initialClosed(state)) { state = MqttKafkaState.closeInitial(state); - doReset(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId); + doReset(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, extension); } } } @@ -1201,7 +1209,7 @@ private void doKafkaReset( { state = MqttKafkaState.closeReply(state); - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); } } @@ -1510,7 +1518,7 @@ private void doKafkaReset( { state = MqttKafkaState.closeReply(state); - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); } } @@ -1894,7 +1902,7 @@ private void doKafkaReset( { state = MqttKafkaState.closeReply(state); - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); } } @@ -2358,7 +2366,25 @@ private void onKafkaReset( assert delegate.initialAck <= delegate.initialSeq; - delegate.doMqttReset(traceId); + final OctetsFW extension = reset.extension(); + final ExtensionFW resetEx = extension.get(extensionRO::tryWrap); + final KafkaResetExFW kafkaResetEx = + resetEx != null && resetEx.typeId() == kafkaTypeId ? extension.get(kafkaResetExRO::tryWrap) : null; + + Flyweight mqttResetEx = EMPTY_OCTETS; + + final String16FW consumerId = kafkaResetEx != null ? kafkaResetEx.consumerId() : null; + + if (consumerId != null) + { + mqttResetEx = mqttResetExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(mqttTypeId) + .serverRef(consumerId) + .build(); + } + + delegate.doMqttReset(traceId, mqttResetEx); } private void doKafkaReset( @@ -2368,7 +2394,7 @@ private void doKafkaReset( { state = MqttKafkaState.closeReply(state); - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); } } @@ -2409,7 +2435,7 @@ protected void doKafkaBegin(long traceId, long authorization, long affinity) kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, affinity, delegate.sessionsTopic, null, delegate.clientIdMigrate, - delegate.sessionId, KafkaCapabilities.PRODUCE_AND_FETCH); + delegate.sessionId, delegate.serverRef, KafkaCapabilities.PRODUCE_AND_FETCH); } @Override @@ -2494,7 +2520,7 @@ protected void doKafkaBegin( KafkaCapabilities.PRODUCE_ONLY : KafkaCapabilities.PRODUCE_AND_FETCH; kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, delegate.clientIdMigrate, - delegate.sessionId, capabilities); + delegate.sessionId, delegate.serverRef, capabilities); } @Override @@ -2653,7 +2679,7 @@ protected void doKafkaBegin( state = MqttKafkaState.openingInitial(state); kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId); + traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, delegate.serverRef); } } @@ -2950,7 +2976,7 @@ private void onKafkaReset( assert delegate.initialAck <= delegate.initialSeq; - delegate.doMqttReset(traceId); + delegate.doMqttReset(traceId, EMPTY_OCTETS); } private void doKafkaReset( @@ -2960,7 +2986,7 @@ private void doKafkaReset( { state = MqttKafkaState.closeReply(state); - doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId); + doReset(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, EMPTY_OCTETS); } } @@ -3175,6 +3201,7 @@ private MessageConsumer newKafkaStream( String16FW clientId, String16FW clientIdMigrate, String16FW sessionId, + String serverRef, KafkaCapabilities capabilities) { final KafkaBeginExFW kafkaBeginEx = @@ -3185,6 +3212,7 @@ private MessageConsumer newKafkaStream( m.capabilities(c -> c.set(capabilities)); m.topic(sessionsTopicName); m.groupId(MQTT_CLIENTS_GROUP_ID); + m.consumerId(serverRef); if (clientId != null) { m.partitionsItem(p -> @@ -3287,7 +3315,8 @@ private MessageConsumer newKafkaStream( long authorization, long affinity, String16FW topic, - String16FW clientId) + String16FW clientId, + String serverRef) { String16FW key = new String16FW(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX); final KafkaBeginExFW kafkaBeginEx = @@ -3297,6 +3326,7 @@ private MessageConsumer newKafkaStream( m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)) .topic(topic) .groupId(MQTT_CLIENTS_GROUP_ID) + .consumerId(serverRef) .partitionsItem(p -> p.partitionId(KafkaOffsetType.HISTORICAL.value()) .partitionOffset(KafkaOffsetType.HISTORICAL.value())) @@ -3517,7 +3547,8 @@ private void doReset( long sequence, long acknowledge, int maximum, - long traceId) + long traceId, + Flyweight extension) { final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -3527,6 +3558,7 @@ private void doReset( .acknowledge(acknowledge) .maximum(maximum) .traceId(traceId) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) .build(); sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java index 4720b202df..3405632c2a 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java @@ -230,6 +230,7 @@ public void shouldSendMultipleClients() throws Exception @Test @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/publish.with.user.property/client", "${kafka}/publish.with.user.property/server"}) diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java index cebf00d7e1..b04b6c499c 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java @@ -198,6 +198,20 @@ public void shouldGroupStreamReceiveServerSentReset() throws Exception k3po.finish(); } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_ID_NAME, + value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Specification({ + "${mqtt}/session.redirect/client", + "${kafka}/session.redirect/server"}) + public void shouldRedirect() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Configure(name = SESSION_ID_NAME, diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index 427a07998e..e2b77ea581 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -2250,6 +2250,13 @@ public KafkaResetExBuilder error( return this; } + public KafkaResetExBuilder consumerId( + String consumerId) + { + resetExRW.consumerId(consumerId); + return this; + } + public byte[] build() { final KafkaResetExFW resetEx = resetExRW.build(); @@ -4035,6 +4042,7 @@ public final class KafkaMergedBeginExMatcherBuilder private KafkaCapabilities capabilities; private String16FW topic; private String16FW groupId; + private String16FW consumerId; private Array32FW.Builder partitionsRW; private KafkaIsolation isolation; private KafkaDeltaType deltaType; @@ -4068,6 +4076,13 @@ public KafkaMergedBeginExMatcherBuilder groupId( return this; } + public KafkaMergedBeginExMatcherBuilder consumerId( + String consumerId) + { + this.consumerId = new String16FW(consumerId); + return this; + } + public KafkaMergedBeginExMatcherBuilder partition( int partitionId, long offset) @@ -4169,6 +4184,7 @@ private boolean match( return matchCapabilities(mergedBeginEx) && matchTopic(mergedBeginEx) && matchGroupId(mergedBeginEx) && + matchConsumerId(mergedBeginEx) && matchPartitions(mergedBeginEx) && matchFilters(mergedBeginEx) && matchIsolation(mergedBeginEx) && @@ -4195,6 +4211,12 @@ private boolean matchGroupId( return groupId == null || groupId.equals(mergedBeginEx.groupId()); } + private boolean matchConsumerId( + final KafkaMergedBeginExFW mergedBeginEx) + { + return consumerId == null || consumerId.equals(mergedBeginEx.consumerId()); + } + private boolean matchPartitions( final KafkaMergedBeginExFW mergedBeginEx) { diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index b65916d815..f7d4359c7c 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -2642,6 +2642,41 @@ public void shouldMatchMergedBeginExtensionTopic() throws Exception assertNotNull(matcher.match(byteBuf)); } + @Test + public void shouldMatchMergedBeginExtensionConsumerId() throws Exception + { + BytesMatcher matcher = KafkaFunctions.matchBeginEx() + .merged() + .topic("topic") + .consumerId("test") + .build() + .build(); + + ByteBuffer byteBuf = ByteBuffer.allocate(1024); + + new KafkaBeginExFW.Builder() + .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) + .typeId(0x01) + .merged(f -> f + .topic("topic") + .consumerId("test") + .partitionsItem(p -> p.partitionId(0).partitionOffset(0L)) + .filtersItem(i -> i + .conditionsItem(c -> c + .key(k -> k + .length(3) + .value(v -> v.set("key".getBytes(UTF_8))))) + .conditionsItem(c -> c + .header(h -> h + .nameLen(4) + .name(n -> n.set("name".getBytes(UTF_8))) + .valueLen(5) + .value(v -> v.set("value".getBytes(UTF_8))))))) + .build(); + + assertNotNull(matcher.match(byteBuf)); + } + @Test public void shouldMatchMergedBeginExtensionPartitions() throws Exception { @@ -3300,6 +3335,21 @@ public void shouldGenerateProduceResetExtension() assertEquals(87, resetEx.error()); } + @Test + public void shouldGenerateResetExtensionWithConsumerId() + { + byte[] build = KafkaFunctions.resetEx() + .typeId(0x01) + .consumerId("consumer-1") + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaResetExFW resetEx = new KafkaResetExFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals(0x01, resetEx.typeId()); + assertEquals("consumer-1", resetEx.consumerId().asString()); + } + @Test public void shouldMatchProduceDataExtensionTimestamp() throws Exception From e3c08f239b4c24a0a4221f241d85c7bd0f125e04 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Mon, 4 Sep 2023 11:16:58 -0700 Subject: [PATCH 075/115] Merged consumer group support (#390) --- .../client.rpt | 5 +- .../server.rpt | 1 + .../client.rpt | 3 +- .../server.rpt | 1 + .../client.rpt | 3 +- .../server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../stream/MqttKafkaSubscribeFactory.java | 46 +- .../command/log/internal/LoggableStream.java | 35 +- .../stream/KafkaCacheClientFactory.java | 7 + .../stream/KafkaCacheConsumerFactory.java | 1022 ++++++++++ .../stream/KafkaCacheGroupFactory.java | 59 +- .../stream/KafkaCacheOffsetFetchFactory.java | 1046 +++++++++++ .../stream/KafkaCacheServerFactory.java | 7 + .../stream/KafkaClientConsumerFactory.java | 1322 +++++++++++++ .../internal/stream/KafkaClientFactory.java | 8 + .../stream/KafkaClientGroupFactory.java | 1242 +++++++++++-- .../stream/KafkaClientOffsetFetchFactory.java | 1644 +++++++++++++++++ .../internal/stream/KafkaMergedFactory.java | 440 ++++- .../stream/KafkaOffsetFetchTopic.java | 32 + .../binding-kafka/src/main/zilla/protocol.idl | 42 +- .../internal/stream/CacheConsumerIT.java | 64 + .../kafka/internal/stream/CacheMergedIT.java | 10 + .../internal/stream/CacheOffsetFetchIT.java | 68 + .../internal/stream/ClientConsumerIT.java | 64 + .../kafka/internal/stream/ClientGroupIT.java | 3 +- .../internal/stream/ClientOffsetFetchIT.java | 62 + .../kafka/produce/bidi.stream.rpc/client.rpt | 1 + .../kafka/produce/bidi.stream.rpc/server.rpt | 1 + .../produce/client.stream.rpc/client.rpt | 1 + .../produce/client.stream.rpc/server.rpt | 1 + .../produce/server.stream.rpc/client.rpt | 1 + .../produce/server.stream.rpc/server.rpt | 1 + .../kafka/produce/unary.rpc/client.rpt | 1 + .../kafka/produce/unary.rpc/server.rpt | 1 + .../kafka/get.item.modified/client.rpt | 1 + .../kafka/get.item.modified/server.rpt | 1 + .../get.item.no.etag.modified/client.rpt | 1 + .../get.item.no.etag.modified/server.rpt | 1 + .../kafka/get.items.modified/client.rpt | 1 + .../kafka/get.items.modified/server.rpt | 1 + .../kafka/get.items.write.flush/client.rpt | 1 + .../kafka/get.items.write.flush/server.rpt | 1 + .../kafka/streams/kafka/get.items/client.rpt | 1 + .../kafka/streams/kafka/get.items/server.rpt | 1 + .../kafka/internal/KafkaFunctions.java | 942 ++++++---- .../main/resources/META-INF/zilla/kafka.idl | 94 +- .../consumer/partition.assignment/client.rpt | 40 + .../consumer/partition.assignment/server.rpt | 48 + .../client.rpt | 9 +- .../server.rpt | 8 +- .../application/group/leader/client.rpt | 9 +- .../application/group/leader/server.rpt | 8 +- .../group/partition.assignment/client.rpt | 64 + .../group/partition.assignment/server.rpt | 69 + .../client.rpt | 20 +- .../server.rpt | 17 +- .../rebalance.protocol.highlander/client.rpt | 21 +- .../rebalance.protocol.highlander/server.rpt | 18 +- .../rebalance.protocol.unknown/client.rpt | 10 +- .../rebalance.protocol.unknown/server.rpt | 8 +- .../group/rebalance.sync.group/client.rpt | 65 + .../group/rebalance.sync.group/server.rpt | 69 + .../merged.fetch.filter.change/client.rpt | 2 + .../merged.fetch.filter.change/server.rpt | 2 + .../client.rpt | 1 + .../server.rpt | 3 +- .../client.rpt | 1 + .../server.rpt | 1 + .../merged.fetch.filter.none/client.rpt | 1 + .../merged.fetch.filter.none/server.rpt | 1 + .../merged.fetch.filter.sync/client.rpt | 1 + .../merged.fetch.filter.sync/server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../merged.fetch.message.values/client.rpt | 1 + .../merged.fetch.message.values/server.rpt | 1 + .../client.rpt | 45 + .../server.rpt | 51 + .../client.rpt | 4 + .../server.rpt | 16 +- .../merged.produce.flush.dynamic/client.rpt | 4 + .../merged.produce.flush.dynamic/server.rpt | 4 + .../merged/merged.produce.flush/client.rpt | 6 +- .../merged/merged.produce.flush/server.rpt | 4 + .../client.rpt | 170 ++ .../server.rpt | 168 ++ .../offset.commit/commit.offset/client.rpt | 37 + .../offset.commit/commit.offset/server.rpt | 43 + .../offset.fetch/partition.offset/client.rpt | 36 + .../offset.fetch/partition.offset/server.rpt | 41 + .../coordinator.not.available/client.rpt | 46 +- .../coordinator.not.available/server.rpt | 36 + .../client.rpt | 93 +- .../server.rpt | 72 + .../client.rpt | 52 +- .../server.rpt | 42 +- .../client.rpt | 105 +- .../server.rpt | 86 +- .../client.rpt | 54 +- .../server.rpt | 42 +- .../rebalance.protocol.highlander/client.rpt | 52 +- .../rebalance.protocol.highlander/server.rpt | 42 +- .../rebalance.protocol.unknown/client.rpt | 52 +- .../rebalance.protocol.unknown/server.rpt | 42 +- .../rebalance.sync.group/client.rpt | 52 +- .../rebalance.sync.group/server.rpt | 42 +- .../leader/client.rpt | 81 +- .../leader/server.rpt | 72 +- .../topic.offset.info/client.rpt | 49 + .../topic.offset.info/server.rpt | 46 + .../kafka/internal/KafkaFunctionsTest.java | 348 ++-- .../kafka/streams/application/ConsumerIT.java | 47 + .../kafka/streams/application/GroupIT.java | 19 +- .../kafka/streams/application/MergedIT.java | 18 + .../server.sent.messages.with.etag/client.rpt | 1 + .../server.sent.messages.with.etag/server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../kafka/server.sent.messages/client.rpt | 1 + .../kafka/server.sent.messages/server.rpt | 1 + 122 files changed, 10172 insertions(+), 847 deletions(-) create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaOffsetFetchTopic.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt create mode 100644 specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt index 4751574dcd..22159c89e1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -15,7 +15,7 @@ connect "zilla://streams/kafka0" option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:transmission "duplex" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) @@ -38,6 +38,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:topic") @@ -72,4 +73,4 @@ read zilla:data.ext ${kafka:matchDataEx() .build() .build()} -read "message" \ No newline at end of file +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt index a3bbe5e066..ed5f4be06c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -40,6 +40,7 @@ connected read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:topic") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt index 961aa5b7da..7bf151bef1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -15,7 +15,7 @@ connect "zilla://streams/kafka0" option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:transmission "duplex" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) @@ -38,6 +38,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:topic") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt index a09ec9ad48..5dd2bb4238 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -39,6 +39,7 @@ connected read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:topic") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index 39be054da7..3f46cd9613 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -15,7 +15,7 @@ connect "zilla://streams/kafka0" option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:transmission "duplex" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) @@ -37,6 +37,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:topic") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index b9347e2956..7cff3610a1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -39,6 +39,7 @@ connected read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:topic") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt index d5c1573ee4..81fdfb6c02 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt @@ -43,6 +43,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:topic") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt index e9a955fe0b..9f9cf79e63 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt @@ -45,6 +45,7 @@ connected read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:topic") diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java index 8e68a70bd1..d4acef38b8 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java @@ -301,30 +301,30 @@ private void onMqttFlush( final KafkaFlushExFW kafkaFlushEx = kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) - .merged(m -> - { - m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); - filters.forEach(filter -> - - m.filtersItem(f -> - { - f.conditionsItem(ci -> - { - subscriptionIds.add((int) filter.subscriptionId()); - buildHeaders(ci, filter.pattern().asString()); - }); - boolean noLocal = (filter.flags() & NO_LOCAL_FLAG) != 0; - if (noLocal) + .merged(mf -> + mf.fetch(m -> + { + m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); + filters.forEach(filter -> + m.filtersItem(f -> { - final DirectBuffer valueBuffer = new String16FW(clientId).value(); - f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> - h.nameLen(helper.kafkaLocalHeaderName.sizeof()) - .name(helper.kafkaLocalHeaderName) - .valueLen(valueBuffer.capacity()) - .value(valueBuffer, 0, valueBuffer.capacity()))))); - } - })); - }) + f.conditionsItem(ci -> + { + subscriptionIds.add((int) filter.subscriptionId()); + buildHeaders(ci, filter.pattern().asString()); + }); + boolean noLocal = (filter.flags() & NO_LOCAL_FLAG) != 0; + if (noLocal) + { + final DirectBuffer valueBuffer = new String16FW(clientId).value(); + f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + h.nameLen(helper.kafkaLocalHeaderName.sizeof()) + .name(helper.kafkaLocalHeaderName) + .valueLen(valueBuffer.capacity()) + .value(valueBuffer, 0, valueBuffer.capacity()))))); + } + })); + })) .build(); delegate.doKafkaFlush(traceId, authorization, budgetId, reserved, kafkaFlushEx); diff --git a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java index c8a8689afb..f218df1826 100644 --- a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java +++ b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java @@ -88,7 +88,7 @@ import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaFetchFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaGroupBeginExFW; -import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaGroupDataExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaGroupFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedFlushExFW; @@ -1080,9 +1080,6 @@ private void onKafkaDataEx( case KafkaDataExFW.KIND_DESCRIBE: onKafkaDescribeDataEx(offset, timestamp, kafkaDataEx.describe()); break; - case KafkaDataExFW.KIND_GROUP: - onKafkaGroupDataEx(offset, timestamp, kafkaDataEx.group()); - break; case KafkaDataExFW.KIND_FETCH: onKafkaFetchDataEx(offset, timestamp, kafkaDataEx.fetch()); break; @@ -1110,17 +1107,6 @@ private void onKafkaDescribeDataEx( format("%s: %s", c.name().asString(), c.value().asString()))); } - private void onKafkaGroupDataEx( - int offset, - long timestamp, - KafkaGroupDataExFW group) - { - String16FW leader = group.leaderId(); - String16FW member = group.memberId(); - - out.printf(verboseFormat, index, offset, timestamp, format("[group] %s %s", leader.asString(), member.asString())); - } - private void onKafkaFetchDataEx( int offset, long timestamp, @@ -1200,6 +1186,9 @@ private void onKafkaFlushEx( case KafkaFlushExFW.KIND_MERGED: onKafkaMergedFlushEx(offset, timestamp, kafkaFlushEx.merged()); break; + case KafkaFlushExFW.KIND_GROUP: + onKafkaGroupFlushEx(offset, timestamp, kafkaFlushEx.group()); + break; case KafkaFlushExFW.KIND_FETCH: onKafkaFetchFlushEx(offset, timestamp, kafkaFlushEx.fetch()); break; @@ -1211,8 +1200,8 @@ private void onKafkaMergedFlushEx( long timestamp, KafkaMergedFlushExFW merged) { - final ArrayFW progress = merged.progress(); - final Array32FW filters = merged.filters(); + final ArrayFW progress = merged.fetch().progress(); + final Array32FW filters = merged.fetch().filters(); out.printf(verboseFormat, index, offset, timestamp, "[merged]"); progress.forEach(p -> out.printf(verboseFormat, index, offset, timestamp, @@ -1224,6 +1213,18 @@ private void onKafkaMergedFlushEx( filters.forEach(f -> f.conditions().forEach(c -> out.printf(verboseFormat, index, offset, timestamp, asString(c)))); } + private void onKafkaGroupFlushEx( + int offset, + long timestamp, + KafkaGroupFlushExFW group) + { + String16FW leader = group.leaderId(); + String16FW member = group.memberId(); + + out.printf(verboseFormat, index, offset, timestamp, format("[group] %s %s (%d)", leader.asString(), + member.asString(), group.members().fieldCount())); + } + private void onKafkaFetchFlushEx( int offset, long timestamp, diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java index de57588c10..8c547bae7e 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java @@ -66,6 +66,11 @@ public KafkaCacheClientFactory( final KafkaCacheGroupFactory cacheGroupFactory = new KafkaCacheGroupFactory(config, context, bindings::get); + final KafkaCacheConsumerFactory consumerGroupFactory = new KafkaCacheConsumerFactory(config, context, bindings::get); + + final KafkaCacheOffsetFetchFactory cacheOffsetFetchFactory = + new KafkaCacheOffsetFetchFactory(config, context, bindings::get); + final KafkaCacheClientFetchFactory cacheFetchFactory = new KafkaCacheClientFetchFactory( config, context, bindings::get, accountant::supplyDebitor, supplyCache, supplyCacheRoute); @@ -79,6 +84,8 @@ public KafkaCacheClientFactory( factories.put(KafkaBeginExFW.KIND_META, cacheMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, cacheDescribeFactory); factories.put(KafkaBeginExFW.KIND_GROUP, cacheGroupFactory); + factories.put(KafkaBeginExFW.KIND_CONSUMER, consumerGroupFactory); + factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, cacheOffsetFetchFactory); factories.put(KafkaBeginExFW.KIND_FETCH, cacheFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, cacheProduceFactory); factories.put(KafkaBeginExFW.KIND_MERGED, cacheMergedFactory); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java new file mode 100644 index 0000000000..6396dfe1af --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java @@ -0,0 +1,1022 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongSupplier; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.IntHashSet; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaTopicPartitionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; + +public final class KafkaCacheConsumerFactory implements BindingHandler +{ + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); + private final KafkaResetExFW kafkaResetExRO = new KafkaResetExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BufferPool bufferPool; + private final Signaler signaler; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongSupplier supplyTraceId; + private final LongFunction supplyNamespace; + private final LongFunction supplyLocalName; + private final LongFunction supplyBinding; + + private final Object2ObjectHashMap clientConsumerFansByGroupId; + + public KafkaCacheConsumerFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.signaler = context.signaler(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyTraceId = context::supplyTraceId; + this.supplyNamespace = context::supplyNamespace; + this.supplyLocalName = context::supplyLocalName; + this.supplyBinding = supplyBinding; + this.clientConsumerFansByGroupId = new Object2ObjectHashMap<>(); + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_CONSUMER; + final KafkaConsumerBeginExFW kafkaConsumerBeginEx = kafkaBeginEx.consumer(); + final String groupId = kafkaConsumerBeginEx.groupId().asString(); + final String topic = kafkaConsumerBeginEx.topic().asString(); + final String consumerId = kafkaConsumerBeginEx.consumerId().asString(); + final int timeout = kafkaConsumerBeginEx.timeout(); + final IntHashSet partitions = new IntHashSet(); + kafkaConsumerBeginEx.partitionIds().forEach(p -> partitions.add(p.partitionId())); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topic, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + KafkaCacheConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); + + if (fanout == null) + { + KafkaCacheConsumerFanout newFanout = + new KafkaCacheConsumerFanout(routedId, resolvedId, authorization, groupId, + topic, consumerId, partitions, timeout); + fanout = newFanout; + clientConsumerFansByGroupId.put(groupId, fanout); + } + + newStream = new KafkaCacheConsumerStream( + fanout, + sender, + originId, + routedId, + initialId, + affinity, + authorization + )::onConsumerMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.limit()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaCacheConsumerFanout + { + private final long originId; + private final long routedId; + private final long authorization; + private final String groupId; + private final String topic; + private final String consumerId; + private final int timeout; + private final List members; + private final IntHashSet partitions; + private final IntHashSet assignedPartitions; + private final Object2ObjectHashMap assignments; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + + + private KafkaCacheConsumerFanout( + long originId, + long routedId, + long authorization, + String groupId, + String topic, + String consumerId, + IntHashSet partitions, + int timeout) + { + this.originId = originId; + this.routedId = routedId; + this.authorization = authorization; + this.groupId = groupId; + this.topic = topic; + this.consumerId = consumerId; + this.partitions = partitions; + this.timeout = timeout; + this.members = new ArrayList<>(); + this.assignedPartitions = new IntHashSet(); + this.assignments = new Object2ObjectHashMap<>(); + } + + private void onConsumerFanoutMemberOpening( + long traceId, + KafkaCacheConsumerStream member) + { + members.add(member); + + assert !members.isEmpty(); + + doConsumerFanoutInitialBeginIfNecessary(traceId); + + if (KafkaState.initialOpened(state)) + { + member.doConsumerInitialWindow(traceId, 0L, 0, 0, 0); + } + + if (KafkaState.replyOpened(state)) + { + member.doConsumerReplyBeginIfNecessary(traceId); + } + } + + private void onConsumerFanoutMemberOpened( + long traceId, + KafkaCacheConsumerStream member) + { + if (!assignedPartitions.isEmpty()) + { + final KafkaDataExFW kafkaDataEx = + kafkaDataExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .consumer(m -> m + .partitions(p -> + assignedPartitions.forEach(ap -> p.item(np -> np.partitionId(ap)))) + .assignments(a -> + assignments.forEach((k, v) -> a.item(na -> na.consumerId(k) + .partitions(pa -> + v.forEach(pi -> pa.item(pai -> pai.partitionId(pi)))))))) + .build(); + member.doConsumerReplyDataIfNecessary(traceId, kafkaDataEx); + } + } + + private void onConsumerFanoutMemberClosed( + long traceId, + KafkaCacheConsumerStream member) + { + members.remove(member); + + if (members.isEmpty()) + { + doConsumerFanoutInitialEndIfNecessary(traceId); + doConsumerFanoutReplyResetIfNecessary(traceId); + } + } + + private void doConsumerFanoutInitialBeginIfNecessary( + long traceId) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + doConsumerFanoutInitialBegin(traceId); + } + } + + private void doConsumerFanoutInitialBegin( + long traceId) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onConsumerFanoutMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, 0L, + ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .consumer(m -> m.groupId(groupId) + .consumerId(consumerId) + .timeout(timeout) + .topic(topic) + .partitionIds(p -> partitions.forEach(tp -> p.item(np -> np.partitionId(tp.intValue()))))) + .build() + .sizeof())); + state = KafkaState.openingInitial(state); + } + + private void doConsumerFanoutInitialEndIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doConsumerFanoutInitialEnd(traceId); + } + } + + private void doConsumerFanoutInitialEnd( + long traceId) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + + private void doConsumerFanoutInitialAbortIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doConsumerFanoutInitialAbort(traceId); + } + } + + private void doConsumerFanoutInitialAbort( + long traceId) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + + private void onConsumerFanoutInitialReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + final OctetsFW extension = reset.extension(); + + final KafkaResetExFW kafkaResetEx = extension.get(kafkaResetExRO::tryWrap); + final int error = kafkaResetEx != null ? kafkaResetEx.error() : -1; + + state = KafkaState.closedInitial(state); + + doConsumerFanoutReplyResetIfNecessary(traceId); + + members.forEach(s -> s.doConsumerInitialResetIfNecessary(traceId)); + } + + private void onConsumerFanoutInitialWindow( + WindowFW window) + { + if (!KafkaState.initialOpened(state)) + { + + final long traceId = window.traceId(); + + state = KafkaState.openedInitial(state); + + members.forEach(s -> s.doConsumerInitialWindow(traceId, 0L, 0, 0, 0)); + } + } + + private void onConsumerFanoutMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerFanoutReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConsumerFanoutReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerFanoutReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerFanoutReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerFanoutInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerFanoutInitialWindow(window); + break; + default: + break; + } + } + + private void onConsumerFanoutReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + members.forEach(s -> s.doConsumerReplyBeginIfNecessary(traceId)); + + doConsumerFanoutReplyWindow(traceId, 0, bufferPool.slotCapacity()); + } + + private void onConsumerFanoutReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int reserved = data.reserved(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + assert kafkaDataEx == null || kafkaDataEx.kind() == KafkaBeginExFW.KIND_CONSUMER; + final KafkaConsumerDataExFW kafkaConsumerDataEx = kafkaDataEx != null ? kafkaDataEx.consumer() : null; + + if (kafkaConsumerDataEx != null) + { + final Array32FW newPartitions = kafkaConsumerDataEx.partitions(); + final Array32FW newAssignments = kafkaConsumerDataEx.assignments(); + + assignedPartitions.clear(); + newPartitions.forEach(p -> this.assignedPartitions.add(p.partitionId())); + + assignments.clear(); + newAssignments.forEach(a -> + { + IntHashSet partitions = new IntHashSet(); + a.partitions().forEach(p -> partitions.add(p.partitionId())); + assignments.put(a.consumerId().asString(), partitions); + }); + + members.forEach(s -> s.doConsumerReplyDataIfNecessary(traceId, kafkaDataEx)); + } + + doConsumerFanoutReplyWindow(traceId, 0, replyMax); + } + + private void onConsumerFanoutReplyEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + doConsumerFanoutInitialEndIfNecessary(traceId); + + members.forEach(s -> s.doConsumerReplyEndIfNecessary(traceId)); + } + + private void onConsumerFanoutReplyAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + doConsumerFanoutInitialAbortIfNecessary(traceId); + + members.forEach(s -> s.doConsumerReplyAbortIfNecessary(traceId)); + } + + private void doConsumerFanoutReplyResetIfNecessary( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doConsumerFanoutReplyReset(traceId); + } + } + + private void doConsumerFanoutReplyReset( + long traceId) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization); + + state = KafkaState.closedReply(state); + } + + private void doConsumerFanoutReplyWindow( + long traceId, + int minReplyNoAck, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, 0L, 0); + } + } + } + + private final class KafkaCacheConsumerStream + { + private final KafkaCacheConsumerFanout group; + private final MessageConsumer sender; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private long replyBudgetId; + + KafkaCacheConsumerStream( + KafkaCacheConsumerFanout group, + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization) + { + this.group = group; + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + } + + private void onConsumerMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerInitialBegin(begin); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerInitialEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerReplyReset(reset); + break; + default: + break; + } + } + + private void onConsumerInitialBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingInitial(state); + + group.onConsumerFanoutMemberOpening(traceId, this); + } + + private void onConsumerInitialEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedInitial(state); + + group.onConsumerFanoutMemberClosed(traceId, this); + + doConsumerReplyEndIfNecessary(traceId); + } + + private void onConsumerInitialAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedInitial(state); + + group.onConsumerFanoutMemberClosed(traceId, this); + + doConsumerReplyAbortIfNecessary(traceId); + } + + private void doConsumerInitialResetIfNecessary( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + doConsumerInitialReset(traceId); + } + + state = KafkaState.closedInitial(state); + } + + private void doConsumerInitialReset( + long traceId) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization); + } + + private void doConsumerInitialWindow( + long traceId, + long budgetId, + int minInitialNoAck, + int minInitialPad, + int minInitialMax) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !KafkaState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + state = KafkaState.openedInitial(state); + + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, minInitialPad); + } + } + + private void doConsumerReplyBeginIfNecessary( + long traceId) + { + if (!KafkaState.replyOpening(state)) + { + doConsumerReplyBegin(traceId); + } + } + + private void doConsumerReplyBegin( + long traceId) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + private void doConsumerReplyDataIfNecessary( + long traceId, + Flyweight extension) + { + if (KafkaState.replyOpened(state)) + { + doConsumerReplyData(traceId, extension); + } + } + + private void doConsumerReplyData( + long traceId, + Flyweight extension) + { + final int reserved = replyPad; + + doDataNull(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, extension); + + replySeq += reserved; + } + + private void doConsumerReplyEndIfNecessary( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doConsumerReplyEnd(traceId); + } + + state = KafkaState.closedReply(state); + } + + private void doConsumerReplyEnd( + long traceId) + { + state = KafkaState.closedReply(state); + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + private void doConsumerReplyAbortIfNecessary( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doConsumerReplyAbort(traceId); + } + + state = KafkaState.closedReply(state); + } + + private void doConsumerReplyAbort( + long traceId) + { + state = KafkaState.closedReply(state); + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + private void onConsumerReplyReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + group.onConsumerFanoutMemberClosed(traceId, this); + + doConsumerInitialResetIfNecessary(traceId); + } + + private void onConsumerReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + this.replyAck = acknowledge; + this.replyMax = maximum; + this.replyPad = padding; + this.replyBudgetId = budgetId; + + assert replyAck <= replySeq; + + if (!KafkaState.replyOpened(state)) + { + state = KafkaState.openedReply(state); + + final long traceId = window.traceId(); + group.onConsumerFanoutMemberOpened(traceId, this); + } + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java index e4e4ceb6a9..63ba697551 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java @@ -153,7 +153,7 @@ private MessageConsumer newStream( long traceId, long authorization, long affinity, - Consumer extension) + OctetsFW extension) { final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -313,7 +313,7 @@ private void doFlush( long authorization, long budgetId, int reserved, - Consumer extension) + OctetsFW extension) { final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -477,7 +477,8 @@ private KafkaCacheGroupNet( } private void doGroupInitialBegin( - long traceId) + long traceId, + OctetsFW extension) { if (KafkaState.closed(state)) { @@ -497,14 +498,7 @@ private void doGroupInitialBegin( this.replyId = supplyReplyId.applyAsLong(initialId); this.receiver = newStream(this::onGroupMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, 0L, - ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) - .typeId(kafkaTypeId) - .group(g -> g.groupId(delegate.groupId) - .protocol(delegate.protocol) - .timeout(delegate.timeout)) - .build() - .sizeof())); + traceId, authorization, 0L, extension); state = KafkaState.openingInitial(state); } } @@ -527,10 +521,11 @@ private void doGroupInitialData( } private void doGroupInitialFlush( - long traceId) + long traceId, + OctetsFW extension) { doFlush(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, initialBud, 0, EMPTY_EXTENSION); + traceId, authorization, initialBud, 0, extension); } private void doGroupInitialEnd( @@ -620,6 +615,10 @@ private void onGroupMessage( final DataFW data = dataRO.wrap(buffer, index, index + length); onGroupReplyData(data); break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onGroupReplyFlush(flush); + break; case EndFW.TYPE_ID: final EndFW end = endRO.wrap(buffer, index, index + length); onGroupReplyEnd(end); @@ -673,6 +672,26 @@ private void onGroupReplyData( delegate.doGroupReplyData(traceId, flags, reserved, payload, extension); } + private void onGroupReplyFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final int reserved = flush.reserved(); + final OctetsFW extension = flush.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + delegate.doGroupReplyFlush(traceId, extension); + } + private void onGroupReplyEnd( EndFW end) { @@ -838,6 +857,7 @@ private void onGroupInitialBegin( final long traceId = begin.traceId(); final long authorization = begin.authorization(); final long affinity = begin.affinity(); + final OctetsFW extension = begin.extension(); assert acknowledge <= sequence; assert sequence >= initialSeq; @@ -849,7 +869,7 @@ private void onGroupInitialBegin( assert initialAck <= initialSeq; - group.doGroupInitialBegin(traceId); + group.doGroupInitialBegin(traceId, extension); } private void onGroupInitialData( @@ -899,6 +919,7 @@ private void onGroupInitialFlush( final long sequence = flush.sequence(); final long acknowledge = flush.acknowledge(); final long traceId = flush.traceId(); + final OctetsFW extension = flush.extension(); assert acknowledge <= sequence; assert sequence >= initialSeq; @@ -908,7 +929,7 @@ private void onGroupInitialFlush( assert initialAck <= initialSeq; - group.doGroupInitialFlush(traceId); + group.doGroupInitialFlush(traceId, extension); } private void onGroupInitialAbort( @@ -980,6 +1001,14 @@ private void doGroupReplyData( replySeq += reserved; } + private void doGroupReplyFlush( + long traceId, + OctetsFW extension) + { + doFlush(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, 0, extension); + } + private void doGroupReplyEnd( long traceId) { diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java new file mode 100644 index 0000000000..625dd927dd --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java @@ -0,0 +1,1046 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetFetchBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; + + +public final class KafkaCacheOffsetFetchFactory implements BindingHandler +{ + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final FlushFW flushRO = new FlushFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final BufferPool bufferPool; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongFunction supplyBinding; + + public KafkaCacheOffsetFetchFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBinding = supplyBinding; + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_OFFSET_FETCH; + final KafkaOffsetFetchBeginExFW kafkaOffsetFetchBeginEx = kafkaBeginEx.offsetFetch(); + final String groupId = kafkaOffsetFetchBeginEx.groupId().asString(); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, null, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + newStream = new KafkaCacheOffsetFetchApp( + sender, + originId, + routedId, + initialId, + affinity, + authorization, + resolvedId)::onOffsetFetchMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + OctetsFW extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Consumer extension) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaCacheOffsetFetchNet + { + private final long originId; + private final long routedId; + private final long authorization; + private final KafkaCacheOffsetFetchApp delegate; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private long initialBud; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaCacheOffsetFetchNet( + KafkaCacheOffsetFetchApp delegate, + long originId, + long routedId, + long authorization) + { + this.delegate = delegate; + this.originId = originId; + this.routedId = routedId; + this.receiver = MessageConsumer.NOOP; + this.authorization = authorization; + } + + private void doOffsetFetchInitialBegin( + long traceId, + long affinity, + OctetsFW extension) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onOffsetFetchMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, this.authorization, affinity, extension); + state = KafkaState.openingInitial(state); + } + } + + private void doOffsetFetchInitialData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doOffsetFetchInitialFlush( + long traceId) + { + doFlush(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, initialBud, 0, EMPTY_EXTENSION); + } + + private void doOffsetFetchInitialEnd( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doOffsetFetchInitialAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void onOffsetFetchInitialReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + state = KafkaState.closedInitial(state); + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doOffsetFetchInitialReset(traceId); + + doOffsetFetchReplyReset(traceId); + } + + + private void onOffsetFetchInitialWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + initialBud = budgetId; + state = KafkaState.openedInitial(state); + + assert initialAck <= initialSeq; + + delegate.doOffsetFetchInitialWindow(authorization, traceId, budgetId, padding); + } + + private void onOffsetFetchMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onOffsetFetchReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onOffsetFetchReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onOffsetFetchReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onOffsetFetchReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onOffsetFetchInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onOffsetFetchInitialWindow(window); + break; + default: + break; + } + } + + private void onOffsetFetchReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + delegate.doOffsetFetchReplyBegin(traceId, begin.extension()); + } + + private void onOffsetFetchReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + delegate.doOffsetFetchReplyData(traceId, flags, reserved, payload, extension); + } + + private void onOffsetFetchReplyEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doOffsetFetchReplyEnd(traceId); + } + + private void onOffsetFetchReplyAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doOffsetFetchReplyAbort(traceId); + } + + private void doOffsetFetchReplyReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization); + + state = KafkaState.closedReply(state); + } + } + + private void doOffsetFetchReplyWindow( + long traceId, + long authorization, + long budgetId, + int padding) + { + replyAck = Math.max(delegate.replyAck - replyPad, 0); + replyMax = delegate.replyMax; + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding + replyPad); + } + } + + private final class KafkaCacheOffsetFetchApp + { + private final KafkaCacheOffsetFetchNet offsetFetch; + private final MessageConsumer sender; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + private long replyBudgetId; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private long replyBud; + private int replyCap; + + KafkaCacheOffsetFetchApp( + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization, + long resolvedId) + { + this.offsetFetch = new KafkaCacheOffsetFetchNet(this, routedId, resolvedId, authorization); + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + } + + private void onOffsetFetchMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onOffsetFetchInitialBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onOffsetFetchInitialData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onOffsetFetchInitialEnd(end); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onOffsetFetchInitialFlush(flush); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onOffsetFetchInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onOffsetFetchReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onOffsetFetchReplyReset(reset); + break; + default: + break; + } + } + + private void onOffsetFetchInitialBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + final OctetsFW extension = begin.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + state = KafkaState.openingInitial(state); + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialBegin(traceId, affinity, extension); + } + + private void onOffsetFetchInitialData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialData(traceId, authorization, budgetId, reserved, flags, payload, extension); + } + + private void onOffsetFetchInitialEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialEnd(traceId); + } + + private void onOffsetFetchInitialFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialFlush(traceId); + } + + private void onOffsetFetchInitialAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialAbort(traceId); + } + + private void doOffsetFetchInitialReset( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization); + } + + state = KafkaState.closedInitial(state); + } + + private void doOffsetFetchInitialWindow( + long authorization, + long traceId, + long budgetId, + int padding) + { + initialAck = offsetFetch.initialAck; + initialMax = offsetFetch.initialMax; + + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding); + } + + private void doOffsetFetchReplyBegin( + long traceId, + OctetsFW extension) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, extension); + } + + private void doOffsetFetchReplyData( + long traceId, + int flag, + int reserved, + OctetsFW payload, + Flyweight extension) + { + + doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, flag, reserved, payload, extension); + + replySeq += reserved; + } + + private void doOffsetFetchReplyEnd( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void doOffsetFetchReplyAbort( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void onOffsetFetchReplyReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final int maximum = reset.maximum(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + cleanup(traceId); + } + + private void onOffsetFetchReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyBud = budgetId; + replyPad = padding; + replyCap = capabilities; + state = KafkaState.openedReply(state); + + assert replyAck <= replySeq; + + offsetFetch.doOffsetFetchReplyWindow(traceId, acknowledge, budgetId, padding); + } + + private void cleanup( + long traceId) + { + doOffsetFetchInitialReset(traceId); + doOffsetFetchReplyAbort(traceId); + + offsetFetch.doOffsetFetchInitialAbort(traceId); + offsetFetch.doOffsetFetchReplyReset(traceId); + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java index 1d6ea15226..72c431cf91 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java @@ -69,6 +69,11 @@ public KafkaCacheServerFactory( final KafkaCacheGroupFactory cacheGroupFactory = new KafkaCacheGroupFactory(config, context, bindings::get); + final KafkaCacheConsumerFactory consumerGroupFactory = new KafkaCacheConsumerFactory(config, context, bindings::get); + + final KafkaCacheOffsetFetchFactory cacheOffsetFetchFactory = + new KafkaCacheOffsetFetchFactory(config, context, bindings::get); + final KafkaCacheServerFetchFactory cacheFetchFactory = new KafkaCacheServerFetchFactory( config, context, bindings::get, supplyCache, supplyCacheRoute); @@ -79,6 +84,8 @@ public KafkaCacheServerFactory( factories.put(KafkaBeginExFW.KIND_META, cacheMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, cacheDescribeFactory); factories.put(KafkaBeginExFW.KIND_GROUP, cacheGroupFactory); + factories.put(KafkaBeginExFW.KIND_CONSUMER, consumerGroupFactory); + factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, cacheOffsetFetchFactory); factories.put(KafkaBeginExFW.KIND_FETCH, cacheFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, cacheProduceFactory); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java new file mode 100644 index 0000000000..7e81172693 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java @@ -0,0 +1,1322 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.IntHashSet; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.MemberAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.TopicAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFlushExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupTopicMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; + +public final class KafkaClientConsumerFactory implements BindingHandler +{ + private static final Consumer EMPTY_EXTENSION = ex -> {}; + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final FlushFW flushRO = new FlushFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + private final KafkaFlushExFW kafkaFlushExRO = new KafkaFlushExFW(); + private final KafkaGroupMemberMetadataFW kafkaGroupMemberMetadataRO = new KafkaGroupMemberMetadataFW(); + private final Array32FW groupTopicsMetadataRO = + new Array32FW<>(new KafkaGroupTopicMetadataFW()); + private final Array32FW topicAssignmentsRO = + new Array32FW<>(new TopicAssignmentFW()); + + private final Array32FW.Builder memberAssignmentRW = + new Array32FW.Builder<>(new MemberAssignmentFW.Builder(), new MemberAssignmentFW()); + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); + private final KafkaGroupMemberMetadataFW.Builder kafkaGroupMemberMetadataRW = new KafkaGroupMemberMetadataFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BufferPool bufferPool; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongFunction supplyBinding; + private final Object2ObjectHashMap clientConsumerFansByGroupId; + + public KafkaClientConsumerFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBinding = supplyBinding; + this.clientConsumerFansByGroupId = new Object2ObjectHashMap<>(); + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_CONSUMER; + final KafkaConsumerBeginExFW kafkaConsumerBeginEx = kafkaBeginEx.consumer(); + final String groupId = kafkaConsumerBeginEx.groupId().asString(); + final String topic = kafkaConsumerBeginEx.topic().asString(); + final String consumerId = kafkaConsumerBeginEx.consumerId().asString(); + final int timeout = kafkaConsumerBeginEx.timeout(); + final List partitions = new ArrayList<>(); + kafkaConsumerBeginEx.partitionIds().forEach(p -> partitions.add(p.partitionId())); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topic, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + KafkaClientConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); + + if (fanout == null) + { + KafkaClientConsumerFanout newFanout = + new KafkaClientConsumerFanout(routedId, resolvedId, authorization, consumerId, groupId, timeout); + fanout = newFanout; + clientConsumerFansByGroupId.put(groupId, fanout); + } + + newStream = new KafkaClientConsumerStream( + fanout, + sender, + originId, + routedId, + initialId, + affinity, + authorization, + topic, + partitions)::onConsumerMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + DirectBuffer buffer, + int offset, + int limit, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(buffer, offset, limit) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Consumer extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Consumer extension) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaClientConsumerFanout + { + private final String consumerId; + private final String groupId; + private final long originId; + private final long routedId; + private final long authorization; + private final int timeout; + private final List streams; + private final Object2ObjectHashMap members; + private final Object2ObjectHashMap partitionsByTopic; + private final Object2ObjectHashMap> assignment; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private long initialBud; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private String leaderId; + private String memberId; + + + private KafkaClientConsumerFanout( + long originId, + long routedId, + long authorization, + String consumerId, + String groupId, + int timeout) + { + this.originId = originId; + this.routedId = routedId; + this.authorization = authorization; + this.consumerId = consumerId; + this.groupId = groupId; + this.timeout = timeout; + this.streams = new ArrayList<>(); + this.members = new Object2ObjectHashMap<>(); + this.partitionsByTopic = new Object2ObjectHashMap<>(); + this.assignment = new Object2ObjectHashMap<>(); + } + + private void doConsumerInitialBegin( + long traceId) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + KafkaGroupMemberMetadataFW metadata = kafkaGroupMemberMetadataRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .consumerId(consumerId) + .topics(t -> streams.forEach(s -> t.item(tp -> tp + .topic(s.topic) + .partitions(p -> s.partitions.forEach(sp -> + p.item(gtp -> gtp.partitionId(sp))))))) + .build(); + + this.receiver = newStream(this::onConsumerMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, 0L, + ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .group(g -> + g.groupId(groupId) + .protocol("highlander") + .timeout(timeout) + .metadataLen(metadata.sizeof()) + .metadata(metadata.buffer(), 0, metadata.sizeof())) + .build().sizeof())); + state = KafkaState.openingInitial(state); + } + } + + private void doConsumerInitialData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + DirectBuffer buffer, + int offset, + int limit, + Flyweight extension) + { + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, buffer, offset, limit, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doConsumerInitialFlush( + long traceId, + Consumer extension) + { + doFlush(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, initialBud, 0, extension); + } + + private void doConsumerInitialEnd( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doConsumerInitialAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void onConsumerInitialReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= this.initialAck; + + this.initialAck = acknowledge; + state = KafkaState.closedInitial(state); + + assert this.initialAck <= this.initialSeq; + + streams.forEach(m -> m.doConsumerInitialReset(traceId)); + + doConsumerReplyReset(traceId); + } + + + private void onConsumerInitialWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= this.initialAck; + assert maximum >= this.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + initialBud = budgetId; + state = KafkaState.openedInitial(state); + + assert initialAck <= initialSeq; + + streams.forEach(m -> m.doConsumerInitialWindow(authorization, traceId, budgetId, padding)); + } + + private void onConsumerMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConsumerReplyData(data); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onConsumerReplyFlush(flush); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerInitialWindow(window); + break; + default: + break; + } + } + + private void onConsumerReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + streams.forEach(m -> m.doConsumerReplyBegin(traceId, begin.extension())); + } + + private void onConsumerReplyFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorizationId = flush.authorization(); + final int reserved = flush.reserved(); + final OctetsFW extension = flush.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + replyAck = replySeq; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + final KafkaFlushExFW flushEx = extension.get(kafkaFlushExRO::tryWrap); + + if (flushEx != null) + { + KafkaGroupFlushExFW kafkaGroupFlushEx = flushEx.group(); + + leaderId = kafkaGroupFlushEx.leaderId().asString(); + memberId = kafkaGroupFlushEx.memberId().asString(); + + partitionsByTopic.clear(); + members.clear(); + + kafkaGroupFlushEx.members().forEach(m -> + { + final OctetsFW metadata = m.metadata(); + final KafkaGroupMemberMetadataFW groupMetadata = kafkaGroupMemberMetadataRO + .wrap(metadata.buffer(), metadata.offset(), metadata.limit()); + final String consumerId = kafkaGroupMemberMetadataRO.consumerId().asString(); + + groupMetadata.topics().forEach(mt -> + { + final String mId = m.id().asString(); + members.put(mId, consumerId); + + final String topic = mt.topic().asString(); + IntHashSet partitions = partitionsByTopic.computeIfAbsent(topic, s -> new IntHashSet()); + mt.partitions().forEach(p -> partitions.add(p.partitionId())); + }); + + }); + } + + doPartitionAssignment(traceId, authorization); + } + + private void onConsumerReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorizationId = data.authorization(); + final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW payload = data.payload(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + Array32FW topicAssignments = topicAssignmentsRO + .wrap(payload.buffer(), payload.offset(), payload.limit()); + + topicAssignments.forEach(ta -> + { + KafkaClientConsumerStream stream = + streams.stream().filter(s -> s.topic.equals(ta.topic().asString())).findFirst().get(); + + stream.doConsumerReplyData(traceId, flags, replyPad, EMPTY_OCTETS, + ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .consumer(c -> c.partitions(p -> ta + .partitions() + .forEach(np -> p.item(tp -> tp.partitionId(np.partitionId())))) + .assignments(a -> ta.userdata().forEach(u -> + a.item(ua -> ua.consumerId(u.consumerId()).partitions(p -> u.partitions() + .forEach(np -> p.item(tp -> tp.partitionId(np.partitionId())))))))) + .build() + .sizeof())); + }); + } + + private void onConsumerReplyEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + streams.forEach(s -> s.doConsumerReplyEnd(traceId)); + doConsumerInitialEnd(traceId); + } + + private void onConsumerReplyAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + streams.forEach(s -> s.cleanup(traceId)); + + doConsumerInitialAbort(traceId); + } + + private void doConsumerReplyReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization); + + state = KafkaState.closedReply(state); + } + } + + private void doConsumerReplyWindow( + long traceId, + long authorization, + long budgetId, + int padding) + { + replyAck = Math.max(replyAck - replyPad, 0); + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding + replyPad); + } + + private void doPartitionAssignment( + long traceId, + long authorization) + { + if (memberId.equals(leaderId)) + { + int memberSize = members.size(); + partitionsByTopic.forEach((t, p) -> + { + final int partitionSize = p.size(); + final int numberOfPartitionsPerMember = partitionSize / memberSize; + final int extraPartition = partitionSize % memberSize; + + int partitionIndex = 0; + int newPartitionPerTopic = numberOfPartitionsPerMember + extraPartition; + + for (String member : members.keySet()) + { + String consumerId = members.get(member); + List topicPartitions = assignment.computeIfAbsent( + member, tp -> new ArrayList<>()); + List partitions = new ArrayList<>(); + + for (; partitionIndex < newPartitionPerTopic; partitionIndex++) + { + partitions.add(p.iterator().next()); + } + topicPartitions.add(new TopicPartition(consumerId, t, partitions)); + + newPartitionPerTopic += numberOfPartitionsPerMember; + } + }); + } + + doMemberAssigment(traceId, authorization); + } + + private void doMemberAssigment( + long traceId, + long authorization) + { + if (!assignment.isEmpty()) + { + Array32FW assignment = memberAssignmentRW + .wrap(writeBuffer, DataFW.FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) + .item(ma -> this.assignment.forEach((k, v) -> + ma.memberId(k) + .assignments(ta -> v.forEach(tp -> ta.item(i -> + i.topic(tp.topic) + .partitions(p -> tp.partitions.forEach(t -> p.item(tpa -> tpa.partitionId(t)))) + .userdata(u -> + this.assignment.forEach((ak, av) -> + av.stream().filter(atp -> atp.topic.equals(tp.topic)).forEach(at -> + u.item(ud -> ud + .consumerId(at.consumerId) + .partitions(pt -> at.partitions.forEach(up -> + pt.item(pi -> pi.partitionId(up)))))))) + ))))) + .build(); + + doConsumerInitialData(traceId, authorization, initialBud, memberAssignmentRW.sizeof(), 3, + assignment.buffer(), assignment.offset(), assignment.sizeof(), EMPTY_OCTETS); + } + else + { + doConsumerInitialData(traceId, authorization, initialBud, memberAssignmentRW.sizeof(), 3, + EMPTY_OCTETS.buffer(), EMPTY_OCTETS.offset(), EMPTY_OCTETS.sizeof(), EMPTY_OCTETS); + } + } + } + + final class KafkaClientConsumerStream + { + private final KafkaClientConsumerFanout fanout; + private final MessageConsumer sender; + private final String topic; + private final List partitions; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private long replyBud; + private int replyCap; + + KafkaClientConsumerStream( + KafkaClientConsumerFanout fanout, + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization, + String topic, + List partitions) + { + this.fanout = fanout; + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + this.topic = topic; + this.partitions = partitions; + } + + private void onConsumerMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerInitialBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConsumerInitialData(data); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onConsumerInitialFlush(flush); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerInitialEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerReplyReset(reset); + break; + default: + break; + } + } + + private void onConsumerInitialBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + state = KafkaState.openingInitial(state); + + assert initialAck <= initialSeq; + + fanout.streams.add(this); + + fanout.doConsumerInitialBegin(traceId); + } + + private void onConsumerInitialData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + } + + private void onConsumerInitialEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + } + + private void onConsumerInitialFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + } + + private void onConsumerInitialAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + doConsumerReplyAbort(traceId); + fanout.streams.remove(this); + } + + private void doConsumerInitialReset( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization); + } + + state = KafkaState.closedInitial(state); + } + + private void doConsumerInitialWindow( + long authorization, + long traceId, + long budgetId, + int padding) + { + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding); + } + + private void doConsumerReplyBegin( + long traceId, + OctetsFW extension) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, extension); + } + + private void doConsumerReplyData( + long traceId, + int flag, + int reserved, + OctetsFW payload, + Consumer extension) + { + doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBud, flag, reserved, payload, extension); + + replySeq += reserved; + } + + private void doConsumerReplyEnd( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void doConsumerReplyAbort( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void onConsumerReplyReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final int maximum = reset.maximum(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + cleanup(traceId); + } + + private void onConsumerReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long authorizationId = window.authorization(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyBud = budgetId; + replyPad = padding; + replyCap = capabilities; + state = KafkaState.openedReply(state); + + assert replyAck <= replySeq; + + fanout.replyMax = replyMax; + fanout.doConsumerReplyWindow(traceId, authorizationId, budgetId, padding); + } + + private void cleanup( + long traceId) + { + doConsumerInitialReset(traceId); + doConsumerReplyAbort(traceId); + } + } + + final class TopicPartition + { + private final String consumerId; + private final String topic; + private final List partitions; + + TopicPartition( + String consumerId, + String topic, + List partitions) + { + this.consumerId = consumerId; + this.topic = topic; + this.partitions = partitions; + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java index 10adeb7c4a..289ebff849 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java @@ -61,12 +61,18 @@ public KafkaClientFactory( final KafkaClientGroupFactory clientGroupFactory = new KafkaClientGroupFactory( config, context, bindings::get, accountant::supplyDebitor); + final KafkaClientConsumerFactory clientConsumerFactory = new KafkaClientConsumerFactory( + config, context, bindings::get); + final KafkaClientFetchFactory clientFetchFactory = new KafkaClientFetchFactory( config, context, bindings::get, accountant::supplyDebitor, supplyClientRoute); final KafkaClientProduceFactory clientProduceFactory = new KafkaClientProduceFactory( config, context, bindings::get, supplyClientRoute); + final KafkaClientOffsetFetchFactory clientOffsetFetchFactory = new KafkaClientOffsetFetchFactory( + config, context, bindings::get, supplyClientRoute); + final KafkaMergedFactory clientMergedFactory = new KafkaMergedFactory( config, context, bindings::get, accountant.creditor()); @@ -74,8 +80,10 @@ public KafkaClientFactory( factories.put(KafkaBeginExFW.KIND_META, clientMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, clientDescribeFactory); factories.put(KafkaBeginExFW.KIND_GROUP, clientGroupFactory); + factories.put(KafkaBeginExFW.KIND_CONSUMER, clientConsumerFactory); factories.put(KafkaBeginExFW.KIND_FETCH, clientFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, clientProduceFactory); + factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, clientOffsetFetchFactory); factories.put(KafkaBeginExFW.KIND_MERGED, clientMergedFactory); this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index aba4326b47..8cfd2ae67c 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -19,9 +19,12 @@ import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; import static java.lang.System.currentTimeMillis; +import static java.nio.charset.StandardCharsets.UTF_8; +import java.nio.ByteOrder; import java.time.Duration; import java.util.ArrayList; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.function.Consumer; @@ -32,6 +35,7 @@ import org.agrona.MutableDirectBuffer; import org.agrona.collections.Long2ObjectHashMap; import org.agrona.collections.LongLongConsumer; +import org.agrona.collections.MutableInteger; import org.agrona.collections.Object2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; @@ -40,11 +44,15 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ConfigResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.DescribeConfigsRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.DescribeConfigsResponseFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceResponseFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.AssignmentFW; @@ -61,6 +69,8 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.ProtocolMetadataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.SyncGroupRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.SyncGroupResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.MemberAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.TopicAssignmentFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; @@ -69,7 +79,9 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; @@ -86,12 +98,16 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp { private static final short ERROR_EXISTS = -1; private static final short ERROR_NONE = 0; + private static final short ERROR_COORDINATOR_NOT_AVAILABLE = 15; private static final short ERROR_NOT_COORDINATOR_FOR_CONSUMER = 16; private static final short ERROR_UNKNOWN_MEMBER = 25; private static final short ERROR_MEMBER_ID_REQUIRED = 79; private static final short ERROR_REBALANCE_IN_PROGRESS = 27; private static final short SIGNAL_NEXT_REQUEST = 1; + private static final short DESCRIBE_CONFIGS_API_KEY = 32; + private static final short DESCRIBE_CONFIGS_API_VERSION = 0; + private static final byte RESOURCE_TYPE_BROKER = 1; private static final short FIND_COORDINATOR_API_KEY = 10; private static final short FIND_COORDINATOR_API_VERSION = 1; private static final short JOIN_GROUP_API_KEY = 11; @@ -105,6 +121,8 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private static final String UNKNOWN_MEMBER_ID = ""; private static final String HIGHLANDER_PROTOCOL = "highlander"; + private static final String GROUP_MIN_SESSION_TIMEOUT = "group.min.session.timeout.ms"; + private static final String GROUP_MAX_SESSION_TIMEOUT = "group.max.session.timeout.ms"; private static final byte GROUP_KEY_TYPE = 0x00; private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); @@ -123,16 +141,21 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final BeginFW.Builder beginRW = new BeginFW.Builder(); private final DataFW.Builder dataRW = new DataFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); private final EndFW.Builder endRW = new EndFW.Builder(); private final AbortFW.Builder abortRW = new AbortFW.Builder(); private final ResetFW.Builder resetRW = new ResetFW.Builder(); private final WindowFW.Builder windowRW = new WindowFW.Builder(); private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder(); private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); + private final DescribeConfigsRequestFW.Builder describeConfigsRequestRW = new DescribeConfigsRequestFW.Builder(); + private final ResourceRequestFW.Builder resourceRequestRW = new ResourceRequestFW.Builder(); + private final String16FW.Builder configNameRW = new String16FW.Builder(ByteOrder.BIG_ENDIAN); private final FindCoordinatorRequestFW.Builder findCoordinatorRequestRW = new FindCoordinatorRequestFW.Builder(); private final JoinGroupRequestFW.Builder joinGroupRequestRW = new JoinGroupRequestFW.Builder(); private final ProtocolMetadataFW.Builder protocolMetadataRW = new ProtocolMetadataFW.Builder(); @@ -141,9 +164,11 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final HeartbeatRequestFW.Builder heartbeatRequestRW = new HeartbeatRequestFW.Builder(); private final LeaveGroupRequestFW.Builder leaveGroupRequestRW = new LeaveGroupRequestFW.Builder(); private final LeaveMemberFW.Builder leaveMemberRW = new LeaveMemberFW.Builder(); - private final ResourceRequestFW.Builder resourceRequestRW = new ResourceRequestFW.Builder(); + private final ResourceResponseFW resourceResponseRO = new ResourceResponseFW(); + private final ConfigResponseFW configResponseRO = new ConfigResponseFW(); private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW(); + private final DescribeConfigsResponseFW describeConfigsResponseRO = new DescribeConfigsResponseFW(); private final FindCoordinatorResponseFW findCoordinatorResponseRO = new FindCoordinatorResponseFW(); private final JoinGroupResponseFW joinGroupResponseRO = new JoinGroupResponseFW(); private final MemberMetadataFW memberMetadataRO = new MemberMetadataFW(); @@ -151,8 +176,18 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final HeartbeatResponseFW heartbeatResponseRO = new HeartbeatResponseFW(); private final LeaveGroupResponseFW leaveGroupResponseRO = new LeaveGroupResponseFW(); private final LeaveMemberFW leaveMemberRO = new LeaveMemberFW(); - private final ResourceResponseFW resourceResponseRO = new ResourceResponseFW(); - + private final Array32FW memberAssignmentRO = + new Array32FW<>(new MemberAssignmentFW()); + + private final KafkaDescribeClientDecoder decodeSaslHandshakeResponse = this::decodeSaslHandshakeResponse; + private final KafkaDescribeClientDecoder decodeSaslHandshake = this::decodeSaslHandshake; + private final KafkaDescribeClientDecoder decodeSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms; + private final KafkaDescribeClientDecoder decodeSaslHandshakeMechanism = this::decodeSaslHandshakeMechanism; + private final KafkaDescribeClientDecoder decodeSaslAuthenticateResponse = this::decodeSaslAuthenticateResponse; + private final KafkaDescribeClientDecoder decodeSaslAuthenticate = this::decodeSaslAuthenticate; + private final KafkaDescribeClientDecoder decodeDescribeResponse = this::decodeDescribeResponse; + private final KafkaDescribeClientDecoder decodeIgnoreAll = this::decodeIgnoreAll; + private final KafkaDescribeClientDecoder decodeReject = this::decodeReject; private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshakeResponse = this::decodeSaslHandshakeResponse; private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshake = this::decodeSaslHandshake; private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms; @@ -185,6 +220,7 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorIgnoreAll = this::decodeIgnoreAll; private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorReject = this::decodeCoordinatorReject; + private final Map configs = new LinkedHashMap<>(); private final int kafkaTypeId; private final int proxyTypeId; private final MutableDirectBuffer writeBuffer; @@ -436,7 +472,7 @@ private void doData( receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); } - private void doDataNull( + private void doDataEmpty( MessageConsumer receiver, long originId, long routedId, @@ -461,12 +497,44 @@ private void doDataNull( .authorization(authorization) .budgetId(budgetId) .reserved(reserved) + .payload(EMPTY_OCTETS) .extension(extension) .build(); receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); } + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + private void doEnd( MessageConsumer receiver, long originId, @@ -592,6 +660,21 @@ int decode( int limit); } + @FunctionalInterface + private interface KafkaDescribeClientDecoder + { + int decode( + DescribeClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int progress, + int limit); + } + @FunctionalInterface private interface KafkaGroupCoordinatorClientDecoder { @@ -607,6 +690,117 @@ int decode( int limit); } + private int decodeDescribeResponse( + DescribeClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + final int responseSize = responseHeader.length(); + + if (length >= responseHeader.sizeof() + responseSize) + { + progress = responseHeader.limit(); + + final DescribeConfigsResponseFW describeConfigsResponse = + describeConfigsResponseRO.tryWrap(buffer, progress, limit); + + if (describeConfigsResponse == null) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + progress = describeConfigsResponse.limit(); + + final int resourceCount = describeConfigsResponse.resourceCount(); + for (int resourceIndex = 0; resourceIndex < resourceCount; resourceIndex++) + { + final ResourceResponseFW resource = resourceResponseRO.tryWrap(buffer, progress, limit); + if (resource == null) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + progress = resource.limit(); + + final String resourceName = resource.name().asString(); + final int resourceError = resource.errorCode(); + + client.onDecodeResource(traceId, client.authorization, resourceError, resourceName); + // TODO: use different decoder for configs + if (resourceError != ERROR_NONE || !client.delegate.nodeId.equals(resourceName)) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + final int configCount = resource.configCount(); + configs.clear(); + for (int configIndex = 0; configIndex < configCount; configIndex++) + { + final ConfigResponseFW config = configResponseRO.tryWrap(buffer, progress, limit); + if (config == null) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + progress = config.limit(); + + final String name = config.name().asString(); + final String value = config.value().asString(); + + configs.put(name, value); + } + + client.onDecodeDescribeResponse(traceId, configs); + } + } + } + + if (client.decoder == decodeIgnoreAll) + { + client.cleanupNetwork(traceId); + } + + return progress; + } + + private int decodeReject( + DescribeClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + client.doNetworkResetIfNecessary(traceId); + client.decoder = decodeIgnoreAll; + return limit; + } + private int decodeFindCoordinatorResponse( ClusterClient client, long traceId, @@ -650,7 +844,7 @@ else if (findCoordinatorResponse.errorCode() == ERROR_COORDINATOR_NOT_AVAILABLE) } else if (findCoordinatorResponse.errorCode() == ERROR_NONE) { - client.onFindCoordinator(traceId, authorization, + client.onFindCoordinator(traceId, authorization, findCoordinatorResponse.nodeId(), findCoordinatorResponse.host(), findCoordinatorResponse.port()); } else @@ -787,7 +981,8 @@ else if (errorCode == ERROR_NONE) final MemberMetadataFW memberMetadata = memberMetadataRO.tryWrap(buffer, progress, limit); if (memberMetadata != null) { - client.members.add(memberMetadata.memberId().asString()); + client.members.add(new MemberProtocol( + memberMetadata.memberId().asString(), memberMetadata.metadata())); progress = memberMetadata.limit(); } else @@ -797,7 +992,7 @@ else if (errorCode == ERROR_NONE) } client.onJoinGroupResponse(traceId, authorization, joinGroupResponse.leader().asString(), - joinGroupResponse.memberId().asString(), errorCode); + joinGroupResponse.memberId().asString()); } else { @@ -1016,11 +1211,12 @@ private final class KafkaGroupStream { private final MessageConsumer application; private final ClusterClient clusterClient; + private final DescribeClient describeClient; private final CoordinatorClient coordinatorClient; private final GroupMembership groupMembership; private final String groupId; private final String protocol; - private final int timeout; + private int timeout; private final long originId; private final long routedId; private final long initialId; @@ -1028,6 +1224,10 @@ private final class KafkaGroupStream private final long affinity; private final long resolvedId; private final KafkaSaslConfig sasl; + public String host; + public int port; + private String nodeId; + private MutableDirectBuffer metadataBuffer; private int state; @@ -1041,6 +1241,7 @@ private final class KafkaGroupStream private int replyPad; private long replyBudgetId; + private int topicMetadataLimit; KafkaGroupStream( MessageConsumer application, @@ -1068,7 +1269,9 @@ private final class KafkaGroupStream this.groupMembership = groupMembership; this.sasl = sasl; this.clusterClient = new ClusterClient(routedId, resolvedId, sasl, this); + this.describeClient = new DescribeClient(routedId, resolvedId, sasl, this); this.coordinatorClient = new CoordinatorClient(routedId, resolvedId, sasl, this); + this.metadataBuffer = new UnsafeBuffer(new byte[2048]); } private void onApplication( @@ -1117,6 +1320,22 @@ private void onApplicationBegin( { final long traceId = begin.traceId(); final long authorization = begin.authorization(); + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()); + final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? + kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null; + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_GROUP; + final KafkaGroupBeginExFW kafkaGroupBeginEx = kafkaBeginEx.group(); + + OctetsFW metadata = kafkaGroupBeginEx.metadata(); + final int metadataSize = kafkaGroupBeginEx.metadataLen(); + + if (metadataSize > 0) + { + metadataBuffer.putBytes(0, metadata.buffer(), metadata.offset(), kafkaGroupBeginEx.metadataLen()); + topicMetadataLimit += metadataSize; + } state = KafkaState.openingInitial(state); @@ -1221,7 +1440,10 @@ private void doApplicationBegin( final KafkaBeginExFW kafkaBeginEx = kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) - .group(g -> g.groupId(groupId).protocol(protocol).timeout(timeout)) + .group(g -> g + .groupId(groupId) + .protocol(protocol) + .timeout(timeout)) .build(); doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, @@ -1231,8 +1453,7 @@ private void doApplicationBegin( private void doApplicationData( long traceId, long authorization, - OctetsFW payload, - Consumer extension) + OctetsFW payload) { final int reserved = replyPad; @@ -1240,12 +1461,12 @@ private void doApplicationData( { doData(application, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, replyBudgetId, reserved, - payload.value(), payload.offset(), payload.sizeof(), extension); + payload.value(), payload.offset(), payload.sizeof(), EMPTY_EXTENSION); } else { - doDataNull(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, replyBudgetId, reserved, extension); + doDataEmpty(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, EMPTY_EXTENSION); } replySeq += reserved; @@ -1253,6 +1474,20 @@ private void doApplicationData( assert replyAck <= replySeq; } + private void doApplicationFlush( + long traceId, + long authorization, + Flyweight extension) + { + if (!KafkaState.replyClosed(state)) + { + final int reserved = replyPad; + + doFlush(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, extension); + } + } + private void doApplicationEnd( long traceId) { @@ -1338,7 +1573,8 @@ private void cleanupApplication( long traceId, int error) { - final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity()) + final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(writeBuffer, + ResetFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) .error(error) .build(); @@ -1980,12 +2216,17 @@ private void onCoordinatorNotAvailable( private void onFindCoordinator( long traceId, long authorization, + int nodeId, String16FW host, int port) { nextResponseId++; - delegate.coordinatorClient.doNetworkBeginIfNecessary(traceId, authorization, 0, host, port); + delegate.nodeId = String.valueOf(nodeId); + delegate.host = host.asString(); + delegate.port = port; + + delegate.describeClient.doNetworkBegin(traceId, authorization, 0); cleanupNetwork(traceId, authorization); } @@ -2033,18 +2274,15 @@ private void cleanupEncodeSlotIfNecessary() } } - private final class CoordinatorClient extends KafkaSaslClient + private final class DescribeClient extends KafkaSaslClient { private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; - private final LongLongConsumer encodeJoinGroupRequest = this::doEncodeJoinGroupRequest; - private final LongLongConsumer encodeSyncGroupRequest = this::doEncodeSyncGroupRequest; - private final LongLongConsumer encodeHeartbeatRequest = this::doEncodeHeartbeatRequest; - private final LongLongConsumer encodeLeaveGroupRequest = this::doEncodeLeaveGroupRequest; - private final List members; - private final KafkaGroupStream delegate; + private final LongLongConsumer encodeDescribeRequest = this::doEncodeDescribeRequest; private MessageConsumer network; + private final Map configs; + private final KafkaGroupStream delegate; private int state; private long authorization; @@ -2068,27 +2306,47 @@ private final class CoordinatorClient extends KafkaSaslClient private int decodeSlotReserved; private int nextResponseId; - private long heartbeatRequestId = NO_CANCEL_ID; - private String leader; - - private int generationId; - private KafkaGroupCoordinatorClientDecoder decoder; + private KafkaDescribeClientDecoder decoder; private LongLongConsumer encoder; - private OctetsFW assignment = EMPTY_OCTETS; - CoordinatorClient( + DescribeClient( long originId, long routedId, KafkaSaslConfig sasl, KafkaGroupStream delegate) { super(sasl, originId, routedId); - - this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeJoinGroupRequest; + this.configs = new LinkedHashMap<>(); this.delegate = delegate; - this.decoder = decodeCoordinatorReject; - this.members = new ArrayList<>(); + + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeDescribeRequest; + this.decoder = decodeReject; + + this.configs.put(GROUP_MIN_SESSION_TIMEOUT, null); + this.configs.put(GROUP_MAX_SESSION_TIMEOUT, null); + } + + public void onDecodeResource( + long traceId, + long authorization, + int errorCode, + String resource) + { + switch (errorCode) + { + case ERROR_NONE: + assert resource.equals(delegate.nodeId); + break; + default: + final KafkaResetExFW resetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .error(errorCode) + .build(); + delegate.cleanupApplication(traceId, resetEx); + doNetworkEnd(traceId, authorization); + break; + } } private void onNetwork( @@ -2161,7 +2419,7 @@ private void onNetworkData( if (replySeq > replyAck + replyMax) { - onError(traceId); + cleanupNetwork(traceId); } else { @@ -2172,7 +2430,7 @@ private void onNetworkData( if (decodeSlot == NO_SLOT) { - onError(traceId); + cleanupNetwork(traceId); } else { @@ -2204,9 +2462,13 @@ private void onNetworkEnd( cleanupDecodeSlotIfNecessary(); - if (!delegate.isApplicationReplyOpen()) + if (!KafkaState.replyOpened(delegate.state)) { - onError(traceId); + cleanupNetwork(traceId); + } + else if (decodeSlot == NO_SLOT) + { + delegate.doApplicationEnd(traceId); } } @@ -2217,7 +2479,7 @@ private void onNetworkAbort( state = KafkaState.closedReply(state); - onError(traceId); + cleanupNetwork(traceId); } private void onNetworkReset( @@ -2227,7 +2489,7 @@ private void onNetworkReset( state = KafkaState.closedInitial(state); - onError(traceId); + cleanupNetwork(traceId); } private void onNetworkWindow( @@ -2279,12 +2541,10 @@ private void onNetworkSignal( } } - private void doNetworkBeginIfNecessary( + private void doNetworkBegin( long traceId, long authorization, - long affinity, - String16FW host, - int port) + long affinity) { if (KafkaState.closed(state)) { @@ -2293,36 +2553,13 @@ private void doNetworkBeginIfNecessary( state = 0; } - if (!KafkaState.initialOpening(state)) - { - doNetworkBegin(traceId, authorization, affinity, host, port); - } - } - - private void doNetworkBegin( - long traceId, - long authorization, - long affinity, - String16FW host, - int port) - { this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); state = KafkaState.openingInitial(state); - Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) - .typeId(proxyTypeId) - .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) - .source("0.0.0.0") - .destination(host) - .sourcePort(0) - .destinationPort(port))) - .build() - .sizeof()); - network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, extension); + traceId, authorization, affinity, EMPTY_EXTENSION); } @Override @@ -2352,23 +2589,18 @@ private void doNetworkEnd( long traceId, long authorization) { - if (!KafkaState.initialClosed(state)) - { - state = KafkaState.closedInitial(state); - - doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, EMPTY_EXTENSION); - } + state = KafkaState.closedInitial(state); cleanupEncodeSlotIfNecessary(); + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); } - private void doNetworkAbort( + private void doNetworkAbortIfNecessary( long traceId) { - if (KafkaState.initialOpened(state) && - !KafkaState.initialClosed(state)) + if (!KafkaState.initialClosed(state)) { doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, EMPTY_EXTENSION); @@ -2378,7 +2610,7 @@ private void doNetworkAbort( cleanupEncodeSlotIfNecessary(); } - private void doNetworkReset( + private void doNetworkResetIfNecessary( long traceId) { if (!KafkaState.replyClosed(state)) @@ -2424,10 +2656,15 @@ private void doEncodeRequestIfNecessary( } } - private void doEncodeJoinGroupRequest( + private void doEncodeDescribeRequest( long traceId, long budgetId) { + if (KafkaConfiguration.DEBUG) + { + System.out.format("[client] %s DESCRIBE\n", delegate.nodeId); + } + final MutableDirectBuffer encodeBuffer = writeBuffer; final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; final int encodeLimit = encodeBuffer.capacity(); @@ -2436,36 +2673,37 @@ private void doEncodeJoinGroupRequest( final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) .length(0) - .apiKey(JOIN_GROUP_API_KEY) - .apiVersion(JOIN_GROUP_VERSION) + .apiKey(DESCRIBE_CONFIGS_API_KEY) + .apiVersion(DESCRIBE_CONFIGS_API_VERSION) .correlationId(0) - .clientId(clientId) + .clientId((String) null) .build(); encodeProgress = requestHeader.limit(); - final String memberId = delegate.groupMembership.memberIds.getOrDefault(delegate.groupId, UNKNOWN_MEMBER_ID); - - final JoinGroupRequestFW joinGroupRequest = - joinGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) - .groupId(delegate.groupId) - .sessionTimeoutMillis(delegate.timeout) - .rebalanceTimeoutMillis((int) rebalanceTimeout.toMillis()) - .memberId(memberId) - .groupInstanceId(delegate.groupMembership.instanceId) - .protocolType("consumer") - .protocolCount(1) + final DescribeConfigsRequestFW describeConfigsRequest = + describeConfigsRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .resourceCount(1) .build(); - encodeProgress = joinGroupRequest.limit(); + encodeProgress = describeConfigsRequest.limit(); - final ProtocolMetadataFW protocolMetadata = - protocolMetadataRW.wrap(encodeBuffer, encodeProgress, encodeLimit) - .name(delegate.protocol) - .metadata(EMPTY_OCTETS) + final ResourceRequestFW resourceRequest = resourceRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .type(RESOURCE_TYPE_BROKER) + .name(delegate.nodeId) + .configNamesCount(configs.size()) + .build(); + + encodeProgress = resourceRequest.limit(); + + for (String config : configs.keySet()) + { + final String16FW configName = configNameRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .set(config, UTF_8) .build(); - encodeProgress = protocolMetadata.limit(); + encodeProgress = configName.limit(); + } final int requestId = nextRequestId++; final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; @@ -2480,14 +2718,637 @@ private void doEncodeJoinGroupRequest( doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); - decoder = decodeJoinGroupResponse; - - delegate.doApplicationBeginIfNecessary(traceId, authorization); + decoder = decodeDescribeResponse; } - private void doEncodeSyncGroupRequest( + private void encodeNetwork( long traceId, - long budgetId) + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + final int maxLength = limit - offset; + final int initialWin = initialMax - (int)(initialSeq - initialAck); + final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + + if (length > 0) + { + final int reserved = length + initialPad; + + doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); + + initialSeq += reserved; + + assert initialAck <= initialSeq; + } + + final int remaining = maxLength - length; + if (remaining > 0) + { + if (encodeSlot == NO_SLOT) + { + encodeSlot = encodePool.acquire(initialId); + } + + if (encodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeSlotOffset = remaining; + } + } + else + { + cleanupEncodeSlotIfNecessary(); + } + } + + private void decodeNetwork( + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int limit) + { + KafkaDescribeClientDecoder previous = null; + int progress = offset; + while (progress <= limit && previous != decoder) + { + previous = decoder; + progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit); + } + + if (progress < limit) + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot); + decodeBuffer.putBytes(0, buffer, progress, limit - progress); + decodeSlotOffset = limit - progress; + decodeSlotReserved = (limit - progress) * reserved / (limit - offset); + } + + doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax); + } + else + { + cleanupDecodeSlotIfNecessary(); + + if (reserved > 0) + { + doNetworkWindow(traceId, budgetId, 0, 0, replyMax); + } + } + } + + @Override + protected void doDecodeSaslHandshakeResponse( + long traceId) + { + decoder = decodeSaslHandshakeResponse; + } + + @Override + protected void doDecodeSaslHandshake( + long traceId) + { + decoder = decodeSaslHandshake; + } + + @Override + protected void doDecodeSaslHandshakeMechanisms( + long traceId) + { + decoder = decodeSaslHandshakeMechanisms; + } + + @Override + protected void doDecodeSaslHandshakeMechansim( + long traceId) + { + decoder = decodeSaslHandshakeMechanism; + } + + @Override + protected void doDecodeSaslAuthenticateResponse( + long traceId) + { + decoder = decodeSaslAuthenticateResponse; + } + + @Override + protected void doDecodeSaslAuthenticate( + long traceId) + { + decoder = decodeSaslAuthenticate; + } + + @Override + protected void onDecodeSaslHandshakeResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeSaslAuthenticateRequest; + decoder = decodeSaslAuthenticateResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslAuthenticateResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeDescribeRequest; + decoder = decodeDescribeResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslResponse( + long traceId) + { + nextResponseId++; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onDecodeDescribeResponse( + long traceId, + Map newConfigs) + { + nextResponseId++; + + int timeoutMin = Integer.valueOf(newConfigs.get(GROUP_MIN_SESSION_TIMEOUT)).intValue(); + int timeoutMax = Integer.valueOf(newConfigs.get(GROUP_MAX_SESSION_TIMEOUT)).intValue(); + if (delegate.timeout < timeoutMin) + { + delegate.timeout = timeoutMin; + } + else if (delegate.timeout > timeoutMax) + { + delegate.timeout = timeoutMax; + } + + delegate.coordinatorClient.doNetworkBeginIfNecessary(traceId, authorization, 0); + + cleanupNetwork(traceId); + } + + private void cleanupNetwork( + long traceId) + { + doNetworkResetIfNecessary(traceId); + doNetworkAbortIfNecessary(traceId); + } + + private void cleanupDecodeSlotIfNecessary() + { + if (decodeSlot != NO_SLOT) + { + decodePool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + decodeSlotReserved = 0; + } + } + + private void cleanupEncodeSlotIfNecessary() + { + if (encodeSlot != NO_SLOT) + { + encodePool.release(encodeSlot); + encodeSlot = NO_SLOT; + encodeSlotOffset = 0; + encodeSlotTraceId = 0; + } + } + } + + private final class CoordinatorClient extends KafkaSaslClient + { + private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; + private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; + private final LongLongConsumer encodeJoinGroupRequest = this::doEncodeJoinGroupRequest; + private final LongLongConsumer encodeSyncGroupRequest = this::doEncodeSyncGroupRequest; + private final LongLongConsumer encodeHeartbeatRequest = this::doEncodeHeartbeatRequest; + private final LongLongConsumer encodeLeaveGroupRequest = this::doEncodeLeaveGroupRequest; + private final List members; + private final KafkaGroupStream delegate; + + private MessageConsumer network; + + private int state; + private long authorization; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + private long initialBudgetId; + + private long replySeq; + private long replyAck; + private int replyMax; + + private int encodeSlot = NO_SLOT; + private int encodeSlotOffset; + private long encodeSlotTraceId; + + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + private int decodeSlotReserved; + + private int nextResponseId; + private long heartbeatRequestId = NO_CANCEL_ID; + + private String leader; + + private int generationId; + private KafkaGroupCoordinatorClientDecoder decoder; + private LongLongConsumer encoder; + private OctetsFW assignment = EMPTY_OCTETS; + + CoordinatorClient( + long originId, + long routedId, + KafkaSaslConfig sasl, + KafkaGroupStream delegate) + { + super(sasl, originId, routedId); + + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeJoinGroupRequest; + this.delegate = delegate; + this.decoder = decodeCoordinatorReject; + this.members = new ArrayList<>(); + } + + private void onNetwork( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onNetworkBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onNetworkData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onNetworkEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onNetworkAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onNetworkReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onNetworkWindow(window); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onNetworkSignal(signal); + break; + default: + break; + } + } + + private void onNetworkBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + authorization = begin.authorization(); + state = KafkaState.openingReply(state); + + doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity()); + } + + private void onNetworkData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long budgetId = data.budgetId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + data.reserved(); + authorization = data.authorization(); + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + onError(traceId); + } + else + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + onError(traceId); + } + else + { + final OctetsFW payload = data.payload(); + int reserved = data.reserved(); + int offset = payload.offset(); + int limit = payload.limit(); + + final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot); + buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset); + decodeSlotOffset += limit - offset; + decodeSlotReserved += reserved; + + offset = 0; + limit = decodeSlotOffset; + reserved = decodeSlotReserved; + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + } + + private void onNetworkEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + cleanupDecodeSlotIfNecessary(); + + if (!delegate.isApplicationReplyOpen()) + { + onError(traceId); + } + } + + private void onNetworkAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + onError(traceId); + } + + private void onNetworkReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + onError(traceId); + } + + private void onNetworkWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= initialSeq; + assert acknowledge >= initialAck; + assert maximum + acknowledge >= initialMax + initialAck; + + this.initialAck = acknowledge; + this.initialMax = maximum; + this.initialPad = padding; + this.initialBudgetId = budgetId; + + assert initialAck <= initialSeq; + + this.authorization = window.authorization(); + + state = KafkaState.openedInitial(state); + + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); + final int limit = encodeSlotOffset; + + encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); + } + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void onNetworkSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final int signalId = signal.signalId(); + + if (signalId == SIGNAL_NEXT_REQUEST) + { + doEncodeRequestIfNecessary(traceId, initialBudgetId); + } + } + + private void doNetworkBeginIfNecessary( + long traceId, + long authorization, + long affinity) + { + if (KafkaState.closed(state)) + { + replyAck = 0; + replySeq = 0; + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + doNetworkBegin(traceId, authorization, affinity); + } + } + + private void doNetworkBegin( + long traceId, + long authorization, + long affinity) + { + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + state = KafkaState.openingInitial(state); + + Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) + .typeId(proxyTypeId) + .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) + .source("0.0.0.0") + .destination(delegate.host) + .sourcePort(0) + .destinationPort(delegate.port))) + .build() + .sizeof()); + + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, extension); + } + + @Override + protected void doNetworkData( + long traceId, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset); + encodeSlotOffset += limit - offset; + encodeSlotTraceId = traceId; + + buffer = encodeBuffer; + offset = 0; + limit = encodeSlotOffset; + } + + encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit); + } + + private void doNetworkEnd( + long traceId, + long authorization) + { + if (!KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + } + + cleanupEncodeSlotIfNecessary(); + + } + + private void doNetworkAbort( + long traceId) + { + if (KafkaState.initialOpened(state) && + !KafkaState.initialClosed(state)) + { + doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + state = KafkaState.closedInitial(state); + } + + cleanupEncodeSlotIfNecessary(); + } + + private void doNetworkReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + state = KafkaState.closedReply(state); + } + + cleanupDecodeSlotIfNecessary(); + } + + private void doNetworkWindow( + long traceId, + long budgetId, + int minReplyNoAck, + int minReplyPad, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, minReplyPad); + } + } + + private void doEncodeRequestIfNecessary( + long traceId, + long budgetId) + { + if (nextRequestId == nextResponseId) + { + encoder.accept(traceId, budgetId); + } + } + + private void doEncodeJoinGroupRequest( + long traceId, + long budgetId) { final MutableDirectBuffer encodeBuffer = writeBuffer; final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; @@ -2496,6 +3357,67 @@ private void doEncodeSyncGroupRequest( int encodeProgress = encodeOffset; final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(JOIN_GROUP_API_KEY) + .apiVersion(JOIN_GROUP_VERSION) + .correlationId(0) + .clientId(clientId) + .build(); + + encodeProgress = requestHeader.limit(); + + final String memberId = delegate.groupMembership.memberIds.getOrDefault(delegate.groupId, UNKNOWN_MEMBER_ID); + + final JoinGroupRequestFW joinGroupRequest = + joinGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .groupId(delegate.groupId) + .sessionTimeoutMillis(delegate.timeout) + .rebalanceTimeoutMillis((int) rebalanceTimeout.toMillis()) + .memberId(memberId) + .groupInstanceId(delegate.groupMembership.instanceId) + .protocolType("consumer") + .protocolCount(1) + .build(); + + encodeProgress = joinGroupRequest.limit(); + + final ProtocolMetadataFW protocolMetadata = + protocolMetadataRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .name(delegate.protocol) + .metadata(delegate.metadataBuffer, 0, delegate.topicMetadataLimit) + .build(); + + encodeProgress = protocolMetadata.limit(); + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeJoinGroupResponse; + + delegate.doApplicationBeginIfNecessary(traceId, authorization); + } + + private void doEncodeSyncGroupRequest( + long traceId, + long budgetId) + { + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + MutableInteger encodeProgress = new MutableInteger(encodeOffset); + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .length(0) .apiKey(SYNC_GROUP_API_KEY) .apiVersion(SYNC_GROUP_VERSION) @@ -2503,39 +3425,55 @@ private void doEncodeSyncGroupRequest( .clientId(clientId) .build(); - encodeProgress = requestHeader.limit(); + encodeProgress.set(requestHeader.limit()); final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); - final boolean isLeader = leader.equals(memberId); - final SyncGroupRequestFW syncGroupRequest = - syncGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + syncGroupRequestRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .groupId(delegate.groupId) .generatedId(generationId) .memberId(memberId) .groupInstanceId(delegate.groupMembership.instanceId) - .assignmentCount(isLeader ? members.size() : 0) + .assignmentCount(members.size()) .build(); - encodeProgress = syncGroupRequest.limit(); + encodeProgress.set(syncGroupRequest.limit()); - if (isLeader) + if (assignment.sizeof() > 0) { - for (int i = 0; i < members.size(); i++) + Array32FW assignments = memberAssignmentRO + .wrap(assignment.buffer(), assignment.offset(), assignment.limit()); + + assignments.forEach(a -> { + Array32FW topicPartitions = a.assignments(); final AssignmentFW groupAssignment = - assignmentRW.wrap(encodeBuffer, encodeProgress, encodeLimit) - .memberId(members.get(i)) - .value(assignment) + assignmentRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .memberId(a.memberId()) + .value(topicPartitions.buffer(), topicPartitions.offset(), topicPartitions.length()) .build(); - encodeProgress = groupAssignment.limit(); - } + encodeProgress.set(groupAssignment.limit()); + }); + } + else + { + members.forEach(m -> + { + final AssignmentFW groupAssignment = + assignmentRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .memberId(m.memberId) + .value(m.metadata) + .build(); + + encodeProgress.set(groupAssignment.limit()); + }); } + final int requestId = nextRequestId++; - final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + final int requestSize = encodeProgress.get() - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) .length(requestSize) @@ -2545,7 +3483,7 @@ private void doEncodeSyncGroupRequest( .clientId(requestHeader.clientId().asString()) .build(); - doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress.get()); decoder = decodeSyncGroupResponse; } @@ -2886,16 +3824,6 @@ private void onNotCoordinatorError( delegate.onNotCoordinatorError(traceId, authorization); } - private void onJoinGroupUnknownMemberError( - long traceId, - long authorization) - { - nextResponseId++; - - delegate.groupMembership.memberIds.put(delegate.groupId, UNKNOWN_MEMBER_ID); - signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); - } - private void onJoinGroupMemberIdError( long traceId, long authorization, @@ -2910,18 +3838,36 @@ private void onJoinGroupMemberIdError( private void onJoinGroupResponse( long traceId, long authorization, - String leader, - String memberId, - int error) + String leaderId, + String memberId) { nextResponseId++; - this.leader = leader; + this.leader = leaderId; delegate.groupMembership.memberIds.put(delegate.groupId, memberId); + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .group(g -> g.leaderId(leaderId) + .memberId(memberId) + .members(gm -> members.forEach(m -> + gm.item(i -> + { + KafkaGroupMemberFW.Builder member = i.id(m.memberId); + if (m.metadata.sizeof() > 0) + { + member.metadataLen(m.metadata.sizeof()) + .metadata(m.metadata) + .build(); + } + })))) + .build(); + + delegate.doApplicationFlush(traceId, authorization, kafkaFlushEx); + encoder = encodeSyncGroupRequest; - signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); } private void onSynGroupRebalance( @@ -2941,16 +3887,9 @@ private void onSyncGroupResponse( { nextResponseId++; - final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); - - delegate.doApplicationData(traceId, authorization, assignment, - ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) - .typeId(kafkaTypeId) - .group(g -> g.leaderId(leader).memberId(memberId).members(members.size())) - .build() - .sizeof())); + delegate.doApplicationData(traceId, authorization, assignment); - if (heartbeatRequestId != NO_CANCEL_ID) + if (heartbeatRequestId == NO_CANCEL_ID) { encoder = encodeHeartbeatRequest; @@ -3052,4 +3991,19 @@ private final class GroupMembership this.memberIds = new Object2ObjectHashMap<>(); } } + + private final class MemberProtocol + { + private final String memberId; + private final OctetsFW metadata; + + MemberProtocol( + String memberId, + OctetsFW metadata) + { + + this.memberId = memberId; + this.metadata = metadata; + } + } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java new file mode 100644 index 0000000000..b072755903 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java @@ -0,0 +1,1644 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; +import static java.util.Objects.requireNonNull; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Int2ObjectHashMap; +import org.agrona.collections.LongLongConsumer; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchPartitionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchTopicRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchTopicResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.PartitionIndexFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetFetchBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; + + +public final class KafkaClientOffsetFetchFactory extends KafkaClientSaslHandshaker implements BindingHandler +{ + private static final int ERROR_NONE = 0; + + private static final int SIGNAL_NEXT_REQUEST = 1; + + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + private static final short OFFSET_FETCH_API_KEY = 9; + private static final short OFFSET_FETCH_API_VERSION = 0; + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final SignalFW signalRO = new SignalFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); + + private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); + private final OffsetFetchRequestFW.Builder offsetFetchRequestRW = new OffsetFetchRequestFW.Builder(); + private final OffsetFetchTopicRequestFW.Builder offsetFetchTopicRequestRW = new OffsetFetchTopicRequestFW.Builder(); + private final PartitionIndexFW.Builder partitionIndexRW = new PartitionIndexFW.Builder(); + + private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW(); + private final OffsetFetchResponseFW offsetFetchResponseRO = new OffsetFetchResponseFW(); + private final OffsetFetchTopicResponseFW offsetFetchTopicResponseRO = new OffsetFetchTopicResponseFW(); + private final OffsetFetchPartitionFW offsetFetchPartitionRO = new OffsetFetchPartitionFW(); + + private final KafkaOffsetFetchClientDecoder decodeSaslHandshakeResponse = this::decodeSaslHandshakeResponse; + private final KafkaOffsetFetchClientDecoder decodeSaslHandshake = this::decodeSaslHandshake; + private final KafkaOffsetFetchClientDecoder decodeSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms; + private final KafkaOffsetFetchClientDecoder decodeSaslHandshakeMechanism = this::decodeSaslHandshakeMechanism; + private final KafkaOffsetFetchClientDecoder decodeSaslAuthenticateResponse = this::decodeSaslAuthenticateResponse; + private final KafkaOffsetFetchClientDecoder decodeSaslAuthenticate = this::decodeSaslAuthenticate; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchResponse = this::decodeOffsetFetchResponse; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchTopics = this::decodeOffsetFetchTopics; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchTopic = this::decodeOffsetFetchTopic; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchPartitions = this::decodeOffsetFetchPartitions; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchPartition = this::decodeOffsetFetchPartition; + + private final KafkaOffsetFetchClientDecoder decodeIgnoreAll = this::decodeIgnoreAll; + private final KafkaOffsetFetchClientDecoder decodeReject = this::decodeReject; + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BufferPool decodePool; + private final BufferPool encodePool; + private final Signaler signaler; + private final BindingHandler streamFactory; + private final LongFunction supplyBinding; + private final LongFunction supplyClientRoute; + + public KafkaClientOffsetFetchFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding, + LongFunction supplyClientRoute) + { + super(config, context); + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.signaler = context.signaler(); + this.streamFactory = context.streamFactory(); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.decodePool = context.bufferPool(); + this.encodePool = context.bufferPool(); + this.supplyBinding = supplyBinding; + this.supplyClientRoute = supplyClientRoute; + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer application) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long affinity = begin.affinity(); + final long authorization = begin.authorization(); + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()); + final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? + kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null; + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_OFFSET_FETCH; + final KafkaOffsetFetchBeginExFW kafkaOffsetFetchBeginEx = kafkaBeginEx.offsetFetch(); + final String groupId = kafkaOffsetFetchBeginEx.groupId().asString(); + List topics = new ArrayList<>(); + kafkaOffsetFetchBeginEx.topics().forEach(t -> + { + List partitions = new ArrayList<>(); + t.partitions().forEach(p -> partitions.add(p.partitionId())); + topics.add(new KafkaOffsetFetchTopic(t.topic().asString(), partitions)); + }); + + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? + binding.resolve(authorization, null, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + final KafkaSaslConfig sasl = binding.sasl(); + + newStream = new KafkaOffsetFetchStream( + application, + originId, + routedId, + initialId, + affinity, + resolvedId, + groupId, + topics, + sasl)::onApplication; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer payload, + int offset, + int length, + Consumer extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload, offset, length) + .extension(extension) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + @FunctionalInterface + private interface KafkaOffsetFetchClientDecoder + { + int decode( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int progress, + int limit); + } + + private int decodeOffsetFetchResponse( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + break decode; + } + + progress = responseHeader.limit(); + + client.decodeableResponseBytes = responseHeader.length(); + + final OffsetFetchResponseFW offsetFetchResponse = offsetFetchResponseRO.tryWrap(buffer, progress, limit); + if (offsetFetchResponse == null) + { + break decode; + } + + progress = offsetFetchResponse.limit(); + + client.decodeableResponseBytes -= offsetFetchResponse.sizeof(); + assert client.decodeableResponseBytes >= 0; + + client.decodeableTopics = offsetFetchResponse.topicCount(); + client.decoder = decodeOffsetFetchTopics; + } + + return progress; + } + + private int decodeOffsetFetchTopics( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + if (client.decodeableTopics == 0) + { + assert client.decodeableResponseBytes == 0; + + client.decoder = decodeOffsetFetchResponse; + } + else + { + client.decoder = decodeOffsetFetchTopic; + } + + return progress; + } + + private int decodeOffsetFetchTopic( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final OffsetFetchTopicResponseFW topicOffsetFetch = offsetFetchTopicResponseRO.tryWrap(buffer, progress, limit); + if (topicOffsetFetch == null) + { + break decode; + } + + final String topic = topicOffsetFetch.name().asString(); + + client.onDecodeTopic(traceId, authorization, topic); + + progress = topicOffsetFetch.limit(); + + client.decodeableResponseBytes -= topicOffsetFetch.sizeof(); + assert client.decodeableResponseBytes >= 0; + + client.decodeablePartitions = topicOffsetFetch.partitionCount(); + client.decoder = decodeOffsetFetchPartitions; + } + + return progress; + } + + private int decodeOffsetFetchPartitions( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + if (client.decodeablePartitions == 0) + { + client.decodeableTopics--; + assert client.decodeableTopics >= 0; + + client.decoder = decodeOffsetFetchTopics; + client.onDecodeOffsetFetchResponse(traceId); + } + else + { + client.decoder = decodeOffsetFetchPartition; + } + + return progress; + } + + private int decodeOffsetFetchPartition( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final OffsetFetchPartitionFW partition = offsetFetchPartitionRO.tryWrap(buffer, progress, limit); + if (partition == null) + { + break decode; + } + + final int partitionError = partition.errorCode(); + final int partitionId = partition.partitionIndex(); + final long offsetCommitted = partition.committedOffset(); + + client.onDecodePartition(traceId, partitionId, offsetCommitted, partitionError); + + progress = partition.limit(); + + client.decodeableResponseBytes -= partition.sizeof(); + assert client.decodeableResponseBytes >= 0; + + client.decodeablePartitions--; + assert client.decodeablePartitions >= 0; + + client.decoder = decodeOffsetFetchPartitions; + } + + return progress; + } + + private int decodeReject( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + client.doNetworkResetIfNecessary(traceId); + client.decoder = decodeIgnoreAll; + return limit; + } + + private int decodeIgnoreAll( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + return limit; + } + + private final class KafkaOffsetFetchStream + { + private final MessageConsumer application; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final KafkaOffsetFetchClient client; + private final KafkaClientRoute clientRoute; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private long replyBudgetId; + + KafkaOffsetFetchStream( + MessageConsumer application, + long originId, + long routedId, + long initialId, + long affinity, + long resolvedId, + String groupId, + List topics, + KafkaSaslConfig sasl) + { + this.application = application; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.clientRoute = supplyClientRoute.apply(resolvedId); + this.client = new KafkaOffsetFetchClient(routedId, resolvedId, groupId, topics, sasl); + } + + private void onApplication( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onApplicationBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onApplicationData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onApplicationEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onApplicationAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onApplicationWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onApplicationReset(reset); + break; + default: + break; + } + } + + private void onApplicationBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + + state = KafkaState.openingInitial(state); + + client.doNetworkBegin(traceId, authorization, affinity); + } + + private void onApplicationData( + DataFW data) + { + final long traceId = data.traceId(); + + client.cleanupNetwork(traceId); + } + + private void onApplicationEnd( + EndFW end) + { + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + state = KafkaState.closedInitial(state); + + client.doNetworkEnd(traceId, authorization); + } + + private void onApplicationAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedInitial(state); + + client.doNetworkAbortIfNecessary(traceId); + } + + private void onApplicationWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + this.replyAck = acknowledge; + this.replyMax = maximum; + this.replyPad = padding; + this.replyBudgetId = budgetId; + + assert replyAck <= replySeq; + } + + private void onApplicationReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + client.doNetworkResetIfNecessary(traceId); + } + + private boolean isApplicationReplyOpen() + { + return KafkaState.replyOpening(state); + } + + private void doApplicationBeginIfNecessary( + long traceId, + long authorization) + { + if (!KafkaState.replyOpening(state)) + { + doApplicationBegin(traceId, authorization); + } + } + + private void doApplicationBegin( + long traceId, + long authorization) + { + state = KafkaState.openingReply(state); + + doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + private void doApplicationData( + long traceId, + long authorization, + KafkaDataExFW extension) + { + final int reserved = replyPad; + + doDataNull(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, extension); + + replySeq += reserved; + + assert replyAck <= replySeq; + } + + private void doApplicationEnd( + long traceId) + { + state = KafkaState.closedReply(state); + //client.stream = nullIfClosed(state, client.stream); + doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, client.authorization, EMPTY_EXTENSION); + } + + private void doApplicationAbort( + long traceId) + { + state = KafkaState.closedReply(state); + //client.stream = nullIfClosed(state, client.stream); + doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, client.authorization, EMPTY_EXTENSION); + } + + private void doApplicationWindow( + long traceId, + long budgetId, + int minInitialNoAck, + int minInitialPad, + int minInitialMax) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !KafkaState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + state = KafkaState.openedInitial(state); + + doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, client.authorization, budgetId, minInitialPad); + } + } + + private void doApplicationReset( + long traceId, + Flyweight extension) + { + state = KafkaState.closedInitial(state); + //client.stream = nullIfClosed(state, client.stream); + + doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, client.authorization, extension); + } + + private void doApplicationAbortIfNecessary( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doApplicationAbort(traceId); + } + } + + private void doApplicationResetIfNecessary( + long traceId, + Flyweight extension) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + doApplicationReset(traceId, extension); + } + } + + private void cleanupApplication( + long traceId, + int error) + { + final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .error(error) + .build(); + + cleanupApplication(traceId, kafkaResetEx); + } + + private void cleanupApplication( + long traceId, + Flyweight extension) + { + doApplicationResetIfNecessary(traceId, extension); + doApplicationAbortIfNecessary(traceId); + } + + private final class KafkaOffsetFetchClient extends KafkaSaslClient + { + private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; + private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; + private final LongLongConsumer encodeOffsetFetchRequest = this::doEncodeOffsetFetchRequest; + + private final String groupId; + private final List topics; + private final Int2ObjectHashMap topicPartitions; + private String newTopic; + + private MessageConsumer network; + private int state; + private int decodeableResponseBytes; + private int decodeableTopics; + private int decodeablePartitions; + private long authorization; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + private long initialBudgetId; + + private long replySeq; + private long replyAck; + private int replyMax; + + private int encodeSlot = NO_SLOT; + private int encodeSlotOffset; + private long encodeSlotTraceId; + + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + private int decodeSlotReserved; + + private int nextResponseId; + + private KafkaOffsetFetchClientDecoder decoder; + private LongLongConsumer encoder; + + KafkaOffsetFetchClient( + long originId, + long routedId, + String groupId, + List topics, + KafkaSaslConfig sasl) + { + super(sasl, originId, routedId); + this.groupId = requireNonNull(groupId); + this.topics = topics; + this.topicPartitions = new Int2ObjectHashMap<>(); + + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeOffsetFetchRequest; + this.decoder = decodeReject; + } + + private void onNetwork( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onNetworkBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onNetworkData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onNetworkEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onNetworkAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onNetworkReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onNetworkWindow(window); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onNetworkSignal(signal); + break; + default: + break; + } + } + + private void onNetworkBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + authorization = begin.authorization(); + state = KafkaState.openingReply(state); + + doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity()); + } + + private void onNetworkData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long budgetId = data.budgetId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + data.reserved(); + authorization = data.authorization(); + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + cleanupNetwork(traceId); + } + else + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final OctetsFW payload = data.payload(); + int reserved = data.reserved(); + int offset = payload.offset(); + int limit = payload.limit(); + + final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot); + buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset); + decodeSlotOffset += limit - offset; + decodeSlotReserved += reserved; + + offset = 0; + limit = decodeSlotOffset; + reserved = decodeSlotReserved; + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + } + + private void onNetworkEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + cleanupDecodeSlotIfNecessary(); + + if (!isApplicationReplyOpen()) + { + cleanupNetwork(traceId); + } + else if (decodeSlot == NO_SLOT) + { + doApplicationEnd(traceId); + } + } + + private void onNetworkAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + cleanupNetwork(traceId); + } + + private void onNetworkReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + cleanupNetwork(traceId); + } + + private void onNetworkWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= initialSeq; + assert acknowledge >= initialAck; + assert maximum + acknowledge >= initialMax + initialAck; + + this.initialAck = acknowledge; + this.initialMax = maximum; + this.initialPad = padding; + this.initialBudgetId = budgetId; + + assert initialAck <= initialSeq; + + this.authorization = window.authorization(); + + state = KafkaState.openedInitial(state); + + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); + final int limit = encodeSlotOffset; + + encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); + } + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void onNetworkSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final int signalId = signal.signalId(); + + if (signalId == SIGNAL_NEXT_REQUEST) + { + doEncodeRequestIfNecessary(traceId, initialBudgetId); + } + } + + private void doNetworkBegin( + long traceId, + long authorization, + long affinity) + { + state = KafkaState.openingInitial(state); + + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + @Override + protected void doNetworkData( + long traceId, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset); + encodeSlotOffset += limit - offset; + encodeSlotTraceId = traceId; + + buffer = encodeBuffer; + offset = 0; + limit = encodeSlotOffset; + } + + encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit); + } + + private void doNetworkEnd( + long traceId, + long authorization) + { + state = KafkaState.closedInitial(state); + + cleanupEncodeSlotIfNecessary(); + + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + } + + private void doNetworkAbortIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + state = KafkaState.closedInitial(state); + } + + cleanupEncodeSlotIfNecessary(); + } + + private void doNetworkResetIfNecessary( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + state = KafkaState.closedReply(state); + } + + cleanupDecodeSlotIfNecessary(); + } + + private void doNetworkWindow( + long traceId, + long budgetId, + int minReplyNoAck, + int minReplyPad, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, minReplyPad); + } + } + + private void doEncodeRequestIfNecessary( + long traceId, + long budgetId) + { + if (nextRequestId == nextResponseId) + { + encoder.accept(traceId, budgetId); + } + } + + private void doEncodeOffsetFetchRequest( + long traceId, + long budgetId) + { + if (KafkaConfiguration.DEBUG) + { + System.out.format("[client] %s OFFSET FETCH\n", groupId); + } + + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + int encodeProgress = encodeOffset; + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(OFFSET_FETCH_API_KEY) + .apiVersion(OFFSET_FETCH_API_VERSION) + .correlationId(0) + .clientId((String) null) + .build(); + + encodeProgress = requestHeader.limit(); + + final OffsetFetchRequestFW offsetFetchRequest = + offsetFetchRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .groupId(groupId) + .topicCount(topics.size()) + .build(); + + encodeProgress = offsetFetchRequest.limit(); + + for (KafkaOffsetFetchTopic topic: topics) + { + final OffsetFetchTopicRequestFW offsetFetchTopicRequest = + offsetFetchTopicRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .topic(topic.topic) + .partitionsCount(topic.partitions.size()) + .build(); + encodeProgress = offsetFetchTopicRequest.limit(); + + for (Integer partition : topic.partitions) + { + final PartitionIndexFW partitionIndex = + partitionIndexRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .index(partition) + .build(); + encodeProgress = partitionIndex.limit(); + } + + } + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeOffsetFetchResponse; + } + + private void encodeNetwork( + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + final int maxLength = limit - offset; + final int initialWin = initialMax - (int)(initialSeq - initialAck); + final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + + if (length > 0) + { + final int reserved = length + initialPad; + + doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); + + initialSeq += reserved; + + assert initialAck <= initialSeq; + } + + final int remaining = maxLength - length; + if (remaining > 0) + { + if (encodeSlot == NO_SLOT) + { + encodeSlot = encodePool.acquire(initialId); + } + + if (encodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeSlotOffset = remaining; + } + } + else + { + cleanupEncodeSlotIfNecessary(); + } + } + + private void decodeNetwork( + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int limit) + { + KafkaOffsetFetchClientDecoder previous = null; + int progress = offset; + while (progress <= limit && previous != decoder) + { + previous = decoder; + progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit); + } + + if (progress < limit) + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot); + decodeBuffer.putBytes(0, buffer, progress, limit - progress); + decodeSlotOffset = limit - progress; + decodeSlotReserved = (limit - progress) * reserved / (limit - offset); + } + + doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax); + } + else + { + cleanupDecodeSlotIfNecessary(); + + if (KafkaState.replyClosing(state)) + { + doApplicationEnd(traceId); + } + else if (reserved > 0) + { + doNetworkWindow(traceId, budgetId, 0, 0, replyMax); + } + } + } + + @Override + protected void doDecodeSaslHandshakeResponse( + long traceId) + { + decoder = decodeSaslHandshakeResponse; + } + + @Override + protected void doDecodeSaslHandshake( + long traceId) + { + decoder = decodeSaslHandshake; + } + + @Override + protected void doDecodeSaslHandshakeMechanisms( + long traceId) + { + decoder = decodeSaslHandshakeMechanisms; + } + + @Override + protected void doDecodeSaslHandshakeMechansim( + long traceId) + { + decoder = decodeSaslHandshakeMechanism; + } + + @Override + protected void doDecodeSaslAuthenticateResponse( + long traceId) + { + decoder = decodeSaslAuthenticateResponse; + } + + @Override + protected void doDecodeSaslAuthenticate( + long traceId) + { + decoder = decodeSaslAuthenticate; + } + + @Override + protected void onDecodeSaslHandshakeResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + client.encoder = client.encodeSaslAuthenticateRequest; + client.decoder = decodeSaslAuthenticateResponse; + break; + default: + cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslAuthenticateResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + client.encoder = client.encodeOffsetFetchRequest; + client.decoder = decodeOffsetFetchResponse; + break; + default: + cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslResponse( + long traceId) + { + nextResponseId++; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onDecodeOffsetFetchResponse( + long traceId) + { + doApplicationBeginIfNecessary(traceId, authorization); + doApplicationWindow(traceId, 0L, 0, 0, 0); + + final KafkaDataExFW kafkaDataEx = kafkaDataExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .offsetFetch(m -> + m.topic(t -> + t.topic(newTopic) + .offsets(o -> topicPartitions.forEach((k, v) -> + o.item(to -> to + .partitionId(k) + .partitionOffset(v) + ))))) + .build(); + + doApplicationData(traceId, authorization, kafkaDataEx); + + nextResponseId++; + } + + public void onDecodeTopic( + long traceId, + long authorization, + String topic) + { + newTopic = topic; + } + + public void onDecodePartition( + long traceId, + int partitionId, + long offsetCommitted, + int partitionError) + { + if (partitionError == ERROR_NONE) + { + topicPartitions.put(partitionId, (Long) offsetCommitted); + } + } + + private void cleanupNetwork( + long traceId) + { + doNetworkResetIfNecessary(traceId); + doNetworkAbortIfNecessary(traceId); + + cleanupApplication(traceId, EMPTY_OCTETS); + } + + private void cleanupDecodeSlotIfNecessary() + { + if (decodeSlot != NO_SLOT) + { + decodePool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + decodeSlotReserved = 0; + } + } + + private void cleanupEncodeSlotIfNecessary() + { + if (encodeSlot != NO_SLOT) + { + encodePool.release(encodeSlot); + encodeSlot = NO_SLOT; + encodeSlotOffset = 0; + encodeSlotTraceId = 0; + } + } + } + } + + +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java index 702792edc7..c9383a6581 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java @@ -34,8 +34,10 @@ import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.collections.Int2IntHashMap; +import org.agrona.collections.Int2ObjectHashMap; import org.agrona.collections.Long2LongHashMap; import org.agrona.collections.MutableInteger; +import org.agrona.collections.MutableReference; import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; @@ -71,6 +73,8 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDescribeDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFetchDataExFW; @@ -81,6 +85,7 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMetaDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaTopicPartitionFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; import io.aklivity.zilla.runtime.engine.EngineContext; @@ -142,6 +147,7 @@ public final class KafkaMergedFactory implements BindingHandler private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); + private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); private final MutableInteger partitionCount = new MutableInteger(); private final MutableInteger initialNoAckRW = new MutableInteger(); @@ -958,7 +964,8 @@ private void doReset( long acknowledge, int maximum, long traceId, - long authorization) + long authorization, + Consumer extension) { final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -969,6 +976,7 @@ private void doReset( .maximum(maximum) .traceId(traceId) .authorization(authorization) + .extension(extension) .build(); sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); @@ -1001,6 +1009,8 @@ private final class KafkaMergedStream private final KafkaUnmergedMetaStream metaStream; private final List fetchStreams; private final List produceStreams; + private final Int2ObjectHashMap consumers; + private final Int2IntHashMap leadersByAssignedId; private final Int2IntHashMap leadersByPartitionId; private final Long2LongHashMap latestOffsetByPartitionId; private final Long2LongHashMap stableOffsetByPartitionId; @@ -1035,7 +1045,11 @@ private final class KafkaMergedStream private int fetchStreamIndex; private long mergedReplyBudgetId = NO_CREDITOR_INDEX; + private KafkaUnmergedConsumerStream consumerStream; private KafkaUnmergedProduceStream producer; + private String groupId; + private String consumerId; + private int timeout; KafkaMergedStream( MessageConsumer sender, @@ -1067,7 +1081,9 @@ private final class KafkaMergedStream this.metaStream = new KafkaUnmergedMetaStream(this); this.fetchStreams = new ArrayList<>(); this.produceStreams = new ArrayList<>(); + this.consumers = new Int2ObjectHashMap<>(); this.leadersByPartitionId = new Int2IntHashMap(-1); + this.leadersByAssignedId = new Int2IntHashMap(-1); this.latestOffsetByPartitionId = new Long2LongHashMap(-3); this.stableOffsetByPartitionId = new Long2LongHashMap(-3); this.nextOffsetsById = initialOffsetsById; @@ -1148,6 +1164,14 @@ private void onMergedInitialBegin( this.maximumOffset = asMaximumOffset(mergedBeginEx.partitions()); this.filters = asMergedFilters(filters); this.evaluation = mergedBeginEx.evaluation(); + this.groupId = mergedBeginEx.groupId().asString(); + this.consumerId = mergedBeginEx.consumerId().asString(); + this.timeout = mergedBeginEx.timeout(); + + if (groupId != null && !groupId.isEmpty()) + { + this.consumerStream = new KafkaUnmergedConsumerStream(this); + } describeStream.doDescribeInitialBegin(traceId); } @@ -1179,6 +1203,7 @@ private void onMergedInitialData( final int flags = data.flags(); final OctetsFW payload = data.payload(); final OctetsFW extension = data.extension(); + MutableReference consumerId = new MutableReference<>(); if (producer == null) { @@ -1198,17 +1223,31 @@ private void onMergedInitialData( final int nextPartitionId = partitionId == DYNAMIC_PARTITION ? nextPartitionData(hashKey, key) : partitionId; final KafkaUnmergedProduceStream newProducer = findProducePartitionLeader(nextPartitionId); - assert newProducer != null; // TODO this.producer = newProducer; - } - assert producer != null; + if (this.producer == null) + { + consumerId.set(consumers.get(nextPartitionId)); + } + } - producer.doProduceInitialData(traceId, reserved, flags, budgetId, payload, extension); + if (this.producer != null) + { + producer.doProduceInitialData(traceId, reserved, flags, budgetId, payload, extension); - if ((flags & FLAGS_FIN) != FLAGS_NONE) + if ((flags & FLAGS_FIN) != FLAGS_NONE) + { + this.producer = null; + } + } + else { - this.producer = null; + doMergedInitialReset(traceId, ex -> ex.set((b, o, l) -> kafkaResetExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .error(0) + .consumerId(consumerId.get()) + .build() + .sizeof())); } } } @@ -1224,7 +1263,7 @@ private int nextPartitionData( KafkaKeyFW hashKey, KafkaKeyFW key) { - final int partitionCount = leadersByPartitionId.size(); + final int partitionCount = leadersByAssignedId.size(); final int keyHash = hashKey.length() != -1 ? defaultKeyHash(hashKey) : key.length() != -1 ? defaultKeyHash(key) : nextNullKeyHashData++; @@ -1236,7 +1275,7 @@ private int nextPartitionData( private int nextPartitionFlush( KafkaKeyFW key) { - final int partitionCount = leadersByPartitionId.size(); + final int partitionCount = leadersByAssignedId.size(); final int keyHash = key.length() != -1 ? defaultKeyHash(key) : nextNullKeyHashFlush++; final int partitionId = partitionCount > 0 ? (0x7fff_ffff & keyHash) % partitionCount : 0; @@ -1295,18 +1334,18 @@ private void onMergedInitialFlush( assert kafkaFlushEx != null; assert kafkaFlushEx.kind() == KafkaFlushExFW.KIND_MERGED; final KafkaMergedFlushExFW kafkaMergedFlushEx = kafkaFlushEx.merged(); - final KafkaCapabilities newCapabilities = kafkaMergedFlushEx.capabilities().get(); - final Array32FW filters = kafkaMergedFlushEx.filters(); + final KafkaCapabilities newCapabilities = kafkaMergedFlushEx.fetch().capabilities().get(); + final Array32FW filters = kafkaMergedFlushEx.fetch().filters(); final List newFilters = asMergedFilters(filters); if (capabilities != newCapabilities) { - this.maximumOffset = asMaximumOffset(kafkaMergedFlushEx.progress()); + this.maximumOffset = asMaximumOffset(kafkaMergedFlushEx.fetch().progress()); if (hasFetchCapability(newCapabilities) && !hasFetchCapability(capabilities)) { final Long2LongHashMap initialOffsetsById = new Long2LongHashMap(-3L); - kafkaMergedFlushEx.progress().forEach(p -> + kafkaMergedFlushEx.fetch().progress().forEach(p -> { final long partitionId = p.partitionId(); if (partitionId >= 0L) @@ -1330,8 +1369,8 @@ private void onMergedInitialFlush( { if (hasProduceCapability(capabilities)) { - final KafkaOffsetFW partition = kafkaMergedFlushEx.partition(); - final KafkaKeyFW key = kafkaMergedFlushEx.key(); + final KafkaOffsetFW partition = kafkaMergedFlushEx.fetch().partition(); + final KafkaKeyFW key = kafkaMergedFlushEx.fetch().key(); if (partition != null) { final int partitionId = partition.partitionId(); @@ -1347,7 +1386,7 @@ private void onMergedInitialFlush( if (hasFetchCapability(capabilities) && !newFilters.equals(this.filters)) { this.filters = newFilters; - final int partitionCount = leadersByPartitionId.size(); + final int partitionCount = leadersByAssignedId.size(); for (int partitionId = 0; partitionId < partitionCount; partitionId++) { doFetchInitialFlush(traceId, partitionId); @@ -1624,13 +1663,14 @@ private void doMergedInitialWindow( } private void doMergedInitialReset( - long traceId) + long traceId, + Consumer extension) { assert !KafkaState.initialClosed(state); state = KafkaState.closedInitial(state); doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization); + traceId, authorization, extension); } private void doMergedReplyEndIfNecessary( @@ -1675,12 +1715,12 @@ private void doMergedReplyFlush( { final KafkaFlushExFW kafkaFlushExFW = kafkaFlushExRW.wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) - .merged(f -> f - .progress(ps -> nextOffsetsById.longForEach((p, o) -> - ps.item(i -> i.partitionId((int) p) - .partitionOffset(o) - .stableOffset(initialStableOffsetsById.get(p)) - .latestOffset(initialLatestOffsetsById.get(p)))))) + .merged(ff -> ff + .fetch(f -> f.progress(ps -> nextOffsetsById.longForEach((p, o) -> + ps.item(i -> i.partitionId((int) p) + .partitionOffset(o) + .stableOffset(initialStableOffsetsById.get(p)) + .latestOffset(initialLatestOffsetsById.get(p))))))) .build(); doFlush(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, @@ -1729,7 +1769,7 @@ private void doMergedInitialResetIfNecessary( cleanupBudgetCreditorIfNecessary(); if (fetchStreams.isEmpty()) { - doMergedInitialReset(traceId); + doMergedInitialReset(traceId, EMPTY_EXTENSION); } } } @@ -1765,7 +1805,44 @@ private void onTopicMetaDataChanged( partitions.forEach(p -> leadersByPartitionId.put(p.partitionId(), p.leaderId())); partitionCount.value = 0; partitions.forEach(partition -> partitionCount.value++); - assert leadersByPartitionId.size() == partitionCount.value; + + if (this.consumerStream != null) + { + this.consumerStream.doConsumerInitialBeginIfNecessary(traceId); + } + else + { + leadersByAssignedId.clear(); + partitions.forEach(p -> leadersByAssignedId.put(p.partitionId(), p.leaderId())); + assert leadersByAssignedId.size() == partitionCount.value; + + doFetchPartitionsIfNecessary(traceId); + doProducePartitionsIfNecessary(traceId); + } + } + + private void onTopicConsumerDataChanged( + long traceId, + Array32FW partitions, + Array32FW newAssignments) + { + leadersByAssignedId.clear(); + partitions.forEach(p -> + { + int partitionId = p.partitionId(); + int leaderId = leadersByPartitionId.get(partitionId); + leadersByAssignedId.put(partitionId, leaderId); + }); + + consumers.clear(); + newAssignments.forEach(a -> + { + a.partitions().forEach(p -> + { + final String consumerId = a.consumerId().asString(); + consumers.put(p.partitionId(), consumerId); + }); + }); doFetchPartitionsIfNecessary(traceId); doProducePartitionsIfNecessary(traceId); @@ -1776,19 +1853,14 @@ private void doFetchPartitionsIfNecessary( { if (hasFetchCapability(capabilities)) { - final int partitionCount = leadersByPartitionId.size(); - for (int partitionId = 0; partitionId < partitionCount; partitionId++) - { - doFetchPartitionIfNecessary(traceId, partitionId); - } - assert fetchStreams.size() >= leadersByPartitionId.size(); + leadersByAssignedId.forEach((k, v) -> doFetchPartitionIfNecessary(traceId, k)); + assert fetchStreams.size() >= leadersByAssignedId.size(); - int offsetCount = nextOffsetsById.size(); - for (int partitionId = partitionCount; partitionId < offsetCount; partitionId++) - { - nextOffsetsById.remove(partitionId); - } - assert nextOffsetsById.size() <= leadersByPartitionId.size(); + nextOffsetsById.entrySet() + .removeIf( + entry -> !leadersByAssignedId.containsKey(entry.getKey().intValue())); + + assert nextOffsetsById.size() <= leadersByAssignedId.size(); } } @@ -1797,12 +1869,8 @@ private void doProducePartitionsIfNecessary( { if (hasProduceCapability(capabilities)) { - final int partitionCount = leadersByPartitionId.size(); - for (int partitionId = 0; partitionId < partitionCount; partitionId++) - { - doProducePartitionIfNecessary(traceId, partitionId); - } - assert produceStreams.size() >= leadersByPartitionId.size(); + leadersByAssignedId.forEach((k, v) -> doProducePartitionIfNecessary(traceId, k)); + assert produceStreams.size() >= leadersByAssignedId.size(); } } @@ -1810,7 +1878,7 @@ private void doFetchPartitionIfNecessary( long traceId, int partitionId) { - final int leaderId = leadersByPartitionId.get(partitionId); + final int leaderId = leadersByAssignedId.get(partitionId); final long partitionOffset = nextFetchPartitionOffset(partitionId); KafkaUnmergedFetchStream leader = findFetchPartitionLeader(partitionId); @@ -1941,7 +2009,7 @@ private void doProducePartitionIfNecessary( long traceId, int partitionId) { - final int leaderId = leadersByPartitionId.get(partitionId); + final int leaderId = leadersByAssignedId.get(partitionId); KafkaUnmergedProduceStream leader = findProducePartitionLeader(partitionId); @@ -1967,7 +2035,7 @@ private void onProducePartitionLeaderReady( long traceId, long partitionId) { - if (produceStreams.size() == leadersByPartitionId.size()) + if (produceStreams.size() == leadersByAssignedId.size()) { if (!KafkaState.initialOpened(state)) { @@ -1993,7 +2061,7 @@ private void onProducePartitionLeaderError( final KafkaUnmergedProduceStream leader = findProducePartitionLeader(partitionId); assert leader != null; - if (leadersByPartitionId.containsKey(partitionId)) + if (leadersByAssignedId.containsKey(partitionId)) { leader.doProduceInitialBegin(traceId); } @@ -2279,7 +2347,7 @@ private void doDescribeReplyReset( state = KafkaState.closedReply(state); doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, - traceId, merged.authorization); + traceId, merged.authorization, EMPTY_EXTENSION); } } @@ -2535,7 +2603,271 @@ private void doMetaReplyReset( state = KafkaState.closedReply(state); doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, - traceId, merged.authorization); + traceId, merged.authorization, EMPTY_EXTENSION); + } + } + + private final class KafkaUnmergedConsumerStream + { + private final KafkaMergedStream merged; + + private long initialId; + private long replyId; + private MessageConsumer receiver = NO_RECEIVER; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + + private KafkaUnmergedConsumerStream( + KafkaMergedStream merged) + { + this.merged = merged; + } + + private void doConsumerInitialBeginIfNecessary( + long traceId) + { + if (!KafkaState.initialOpening(state)) + { + doConsumerInitialBegin(traceId); + } + } + + private void doConsumerInitialBegin( + long traceId) + { + assert state == 0; + + state = KafkaState.openingInitial(state); + + this.initialId = supplyInitialId.applyAsLong(merged.resolvedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onConsumerReply, + merged.routedId, merged.resolvedId, initialId, initialSeq, initialAck, initialMax, + traceId, merged.authorization, 0L, + ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .consumer(c -> c + .groupId(merged.groupId) + .consumerId(merged.consumerId) + .timeout(merged.timeout) + .topic(merged.topic) + .partitionIds(p -> merged.leadersByPartitionId.forEach((k, v) -> + p.item(tp -> tp.partitionId(k)))) + ) + .build() + .sizeof())); + } + + private void doConsumerInitialEndIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doConsumerInitialEnd(traceId); + } + } + + private void doConsumerInitialEnd( + long traceId) + { + state = KafkaState.closedInitial(state); + + doEnd(receiver, merged.routedId, merged.resolvedId, initialId, initialSeq, initialAck, initialMax, + traceId, merged.authorization, EMPTY_EXTENSION); + } + + private void doConsumerInitialAbortIfNecessary( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + doConsumerInitialAbort(traceId); + } + } + + private void doConsumerInitialAbort( + long traceId) + { + state = KafkaState.closedInitial(state); + + doAbort(receiver, merged.routedId, merged.resolvedId, initialId, initialSeq, initialAck, initialMax, + traceId, merged.authorization, EMPTY_EXTENSION); + } + + private void onConsumerReply( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConsumerReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerInitialWindow(window); + break; + default: + break; + } + } + + private void onConsumerReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + doConsumerReplyWindow(traceId, 0, 8192); + } + + private void onConsumerReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int reserved = data.reserved(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + merged.doMergedCleanup(traceId); + } + else + { + final KafkaDataExFW kafkaDataEx = extension.get(kafkaDataExRO::wrap); + final KafkaConsumerDataExFW kafkaConsumerDataEx = kafkaDataEx.consumer(); + final Array32FW partitions = kafkaConsumerDataEx.partitions(); + final Array32FW assignments = kafkaConsumerDataEx.assignments(); + merged.onTopicConsumerDataChanged(traceId, partitions, assignments); + + doConsumerReplyWindow(traceId, 0, replyMax); + } + } + + private void onConsumerReplyEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + merged.doMergedReplyBeginIfNecessary(traceId); + merged.doMergedReplyEndIfNecessary(traceId); + + doConsumerInitialEndIfNecessary(traceId); + } + + private void onConsumerReplyAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + merged.doMergedReplyAbortIfNecessary(traceId); + + doConsumerInitialAbortIfNecessary(traceId); + } + + private void onConsumerInitialReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + merged.doMergedInitialResetIfNecessary(traceId); + + doConsumerReplyResetIfNecessary(traceId); + } + + private void onConsumerInitialWindow( + WindowFW window) + { + if (!KafkaState.initialOpened(state)) + { + final long traceId = window.traceId(); + + state = KafkaState.openedInitial(state); + + merged.doMergedInitialWindow(traceId, 0L); + } + } + + private void doConsumerReplyWindow( + long traceId, + int minReplyNoAck, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, + traceId, merged.authorization, 0L, merged.replyPad, DEFAULT_MINIMUM); + } + } + + private void doConsumerReplyResetIfNecessary( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doConsumerReplyReset(traceId); + } + } + + private void doConsumerReplyReset( + long traceId) + { + state = KafkaState.closedReply(state); + + doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, + traceId, merged.authorization, EMPTY_EXTENSION); } } @@ -2925,7 +3257,7 @@ private void doFetchReplyReset( state = KafkaState.closedReply(state); doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, - traceId, merged.authorization); + traceId, merged.authorization, EMPTY_EXTENSION); } private void setFetchFilter( @@ -3093,8 +3425,8 @@ private void doProduceInitialFlush( .typeId(kafkaTypeId) .produce(c -> { - c.partition(kafkaMergedFlushEx.partition()); - c.key(kafkaMergedFlushEx.key()); + c.partition(kafkaMergedFlushEx.fetch().partition()); + c.key(kafkaMergedFlushEx.fetch().key()); }) .build(); @@ -3334,7 +3666,7 @@ private void doProduceReplyReset( state = KafkaState.closedReply(state); doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, - traceId, merged.authorization); + traceId, merged.authorization, EMPTY_EXTENSION); } } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaOffsetFetchTopic.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaOffsetFetchTopic.java new file mode 100644 index 0000000000..f0fbc5f4ec --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaOffsetFetchTopic.java @@ -0,0 +1,32 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.List; + +public final class KafkaOffsetFetchTopic +{ + final String topic; + List partitions; + + KafkaOffsetFetchTopic( + String topic, + List partitions) + { + this.topic = topic; + this.partitions = partitions; + } +} diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl index c789a6b03a..41461651fc 100644 --- a/runtime/binding-kafka/src/main/zilla/protocol.idl +++ b/runtime/binding-kafka/src/main/zilla/protocol.idl @@ -416,15 +416,8 @@ scope protocol struct Assignment { string16 memberId; - uint32 length; - octets[length] value; - } - - struct TopicPartition - { - int32 version; - string16 topic; - int32 partitionCount; + int32 length; + octets[length] value = null; } struct Partition @@ -477,22 +470,35 @@ scope protocol string16 groupInstanceId = null; } - struct TopicPartition + struct OffsetFetchRequest { - int32 partitionId; + string16 groupId; + int32 topicCount; } - struct ConsumerAssignment + struct OffsetFetchTopicRequest { - string16 consumerId; - TopicPartition[] partitions; + string16 topic; + int32 partitionsCount; } - struct MemberAssignment + struct OffsetFetchResponse { - string16 topic; - TopicPartition[] partitions; - octets userdata; + int32 topicCount; + } + + struct OffsetFetchTopicResponse + { + string16 name; + int32 partitionCount; + } + + struct OffsetFetchPartition + { + int32 partitionIndex; + int64 committedOffset; + string16 metadata = null; + int16 errorCode; } } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java new file mode 100644 index 0000000000..9740c276bd --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java @@ -0,0 +1,64 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class CacheConsumerIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/consumer"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("app1") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("cache.when.topic.yaml") + @Specification({ + "${app}/partition.assignment/client", + "${app}/partition.assignment/server" + }) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldAssignPartition() throws Exception + { + k3po.finish(); + } + +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java index 49985db410..ded800e0db 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java @@ -585,4 +585,14 @@ public void shouldReceiveMessagesWithHeadersSkipManyFilter() throws Exception { k3po.finish(); } + + @Test + @Configuration("cache.options.merged.yaml") + @Specification({ + "${app}/merged.group.fetch.message.value/client", + "${app}/unmerged.group.fetch.message.value/server"}) + public void shouldFetchGroupMessageValue() throws Exception + { + k3po.finish(); + } } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java new file mode 100644 index 0000000000..12ebdf2fe7 --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CACHE_SERVER_BOOTSTRAP; +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CACHE_SERVER_RECONNECT_DELAY; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class CacheOffsetFetchIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(10, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) + .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) + .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("app1") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/partition.offset/client", + "${app}/partition.offset/server"}) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldFetchPartitionOffset() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java new file mode 100644 index 0000000000..e7b552c39d --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java @@ -0,0 +1,64 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class ClientConsumerIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/application/group") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/consumer"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/partition.assignment/client", + "${net}/partition.assignment/server"}) + @ScriptProperty("serverAddress \"zilla://streams/net0\"") + public void shouldAssignGroupPartition() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java index 937bbc57a7..f4bdf04b0e 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java @@ -65,7 +65,6 @@ public void shouldHandleClientSentWriteAbortBeforeCoordinatorResponse() throws E @Specification({ "${app}/rebalance.protocol.highlander/client", "${net}/rebalance.protocol.highlander/server"}) - public void shouldLeaveGroupOnGroupRebalanceError() throws Exception { k3po.finish(); @@ -124,7 +123,7 @@ public void shouldRejectSecondStreamOnUnknownProtocol() throws Exception @Test @Configuration("client.yaml") @Specification({ - "${app}/leader/client", + "${app}/rebalance.sync.group/client", "${net}/rebalance.sync.group/server"}) public void shouldHandleRebalanceSyncGroup() throws Exception { diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java new file mode 100644 index 0000000000..8e410691a9 --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java @@ -0,0 +1,62 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class ClientOffsetFetchIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/partition.offset/client", + "${net}/topic.offset.info/server"}) + public void shouldFetchPartitionLastCommittedOffset() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/client.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/client.rpt index e57f947461..9e463519a3 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/client.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/client.rpt @@ -134,6 +134,7 @@ read ${grpc:protobuf() read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/server.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/server.rpt index 67c95b6b5d..689d60bb74 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/server.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/server.rpt @@ -132,6 +132,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/client.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/client.rpt index 9b14d76b8b..70238172e0 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/client.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/client.rpt @@ -131,6 +131,7 @@ read zilla:data.null read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/server.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/server.rpt index b5fcc11706..20a82598b7 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/server.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/server.rpt @@ -130,6 +130,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/client.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/client.rpt index f9f4c96f5b..7924467f0b 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/client.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/client.rpt @@ -115,6 +115,7 @@ read ${grpc:protobuf() read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/server.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/server.rpt index 06f67ddf4c..4de87c778e 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/server.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/server.rpt @@ -115,6 +115,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/client.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/client.rpt index 3267fdb0c9..7ce3c0b948 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/client.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/client.rpt @@ -113,6 +113,7 @@ read zilla:data.null read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/server.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/server.rpt index 07b295000f..f847566d05 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/server.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/server.rpt @@ -114,6 +114,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/client.rpt index f731d79b04..1bc6584da2 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/client.rpt @@ -51,6 +51,7 @@ read '{ "name": "widget" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/server.rpt index dd908087ae..9dd9f5881c 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/server.rpt @@ -55,6 +55,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/client.rpt index b3dae39e06..82283db856 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/client.rpt @@ -50,6 +50,7 @@ read '{ "name": "widget" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/server.rpt index 382be07ace..39541da663 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/server.rpt @@ -54,6 +54,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/client.rpt index a798987ec5..c8b4ac2fde 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/client.rpt @@ -70,6 +70,7 @@ read '{ "name": "gizmo" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/server.rpt index 2930ecee42..565fe37115 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/server.rpt @@ -76,6 +76,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/client.rpt index 520e43e31b..55da055eb3 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/client.rpt @@ -73,6 +73,7 @@ read '{ "name": "gizmo" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 1, 1, 1) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/server.rpt index 9a6aeebc37..34e925d118 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/server.rpt @@ -79,6 +79,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 1, 1, 1) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/client.rpt index 257a5d614d..7b75e00edc 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/client.rpt @@ -71,6 +71,7 @@ read '{ "name": "gizmo" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 1, 1, 1) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/server.rpt index 9b30da2f32..1591d0a2da 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/server.rpt @@ -77,6 +77,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 1, 1, 1) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index 72eadf4add..0cab0d5536 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -59,9 +59,12 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.String16FW; import io.aklivity.zilla.specs.binding.kafka.internal.types.String8FW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.MemberAssignmentFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.TopicAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaApi; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBootstrapBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaDataExFW; @@ -72,10 +75,13 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFetchFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; -import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedConsumerFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFetchFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaDataExFW; @@ -87,6 +93,7 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaTopicPartitionFW; public final class KafkaFunctions { @@ -276,6 +283,24 @@ public static byte[] varint( } } + @Function + public static KafkaGroupMemberMetadataBuilder memberMetadata() + { + return new KafkaGroupMemberMetadataBuilder(); + } + + @Function + public static MemberAssignmentsBuilder memberAssignment() + { + return new MemberAssignmentsBuilder(); + } + + @Function + public static TopicAssignmentsBuilder topicAssignment() + { + return new TopicAssignmentsBuilder(); + } + public abstract static class KafkaHeadersBuilder { private final KafkaHeadersFW.Builder headersRW = new KafkaHeadersFW.Builder(); @@ -555,6 +580,123 @@ private void set( } } + public static final class KafkaGroupMemberMetadataBuilder + { + private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + + private final KafkaGroupMemberMetadataFW.Builder groupMemberMetadataRW = + new KafkaGroupMemberMetadataFW.Builder(); + + public KafkaGroupMemberMetadataBuilder() + { + groupMemberMetadataRW.wrap(writeBuffer, 0, writeBuffer.capacity()); + } + + public KafkaGroupMemberMetadataBuilder consumerId( + String consumerId) + { + groupMemberMetadataRW.consumerId(consumerId); + return this; + } + + public KafkaGroupMemberMetadataBuilder topic( + String topic, + int partitionId) + { + groupMemberMetadataRW.topics(t -> + t.item(tp -> tp.topic(topic) + .partitions(p -> p.item(i -> i.partitionId(partitionId))))); + return this; + } + + public byte[] build() + { + final KafkaGroupMemberMetadataFW metadata = groupMemberMetadataRW.build(); + final byte[] array = new byte[metadata.sizeof()]; + metadata.buffer().getBytes(metadata.offset(), array); + return array; + } + } + + public static final class MemberAssignmentsBuilder + { + private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + + private final Array32FW.Builder memberAssignments = + new Array32FW.Builder(new MemberAssignmentFW.Builder(), new MemberAssignmentFW()); + + public MemberAssignmentsBuilder() + { + memberAssignments.wrap(writeBuffer, 0, writeBuffer.capacity()); + } + + public MemberAssignmentsBuilder member( + String memberId, + String topic, + int partitionId, + String consumerId, + int consumerPartitionId) + { + memberAssignments.item(ma -> + ma.memberId(memberId) + .assignments(ta -> ta.item(i -> + i.topic(topic) + .partitions(p -> p.item(tpa -> tpa.partitionId(partitionId))) + .userdata(u -> + u.item(ud -> ud + .consumerId(consumerId) + .partitions(pt -> pt.item(pi -> pi.partitionId(consumerPartitionId))))) + ))); + return this; + } + + public byte[] build() + { + Array32FW members = memberAssignments.build(); + final byte[] array = new byte[members.sizeof()]; + members.buffer().getBytes(members.offset(), array); + return array; + } + } + + public static final class TopicAssignmentsBuilder + { + private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + + private final Array32FW.Builder topicAssignments = + new Array32FW.Builder(new TopicAssignmentFW.Builder(), new TopicAssignmentFW()); + + public TopicAssignmentsBuilder() + { + topicAssignments.wrap(writeBuffer, 0, writeBuffer.capacity()); + } + + public TopicAssignmentsBuilder topic( + String topic, + int partitionId, + String consumerId, + int consumerPartitionId) + { + topicAssignments.item(i -> + i.topic(topic) + .partitions(p -> p.item(tpa -> tpa.partitionId(partitionId))) + .userdata(u -> + u.item(ud -> ud + .consumerId(consumerId) + .partitions(pt -> pt.item(pi -> pi.partitionId(consumerPartitionId))))) + ); + return this; + } + + public byte[] build() + { + Array32FW topics = topicAssignments.build(); + final byte[] array = new byte[topics.sizeof()]; + topics.buffer().getBytes(topics.offset(), array); + return array; + } + } + public static final class KafkaBeginExBuilder { private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); @@ -714,6 +856,13 @@ public KafkaMergedBeginExBuilder consumerId( return this; } + public KafkaMergedBeginExBuilder timeout( + int timeout) + { + mergedBeginExRW.timeout(timeout); + return this; + } + public KafkaMergedBeginExBuilder partition( int partitionId, long offset) @@ -1048,6 +1197,13 @@ public KafkaGroupBeginExBuilder timeout( return this; } + public KafkaGroupBeginExBuilder metadata( + byte[] metadata) + { + groupBeginExRW.metadataLen(metadata.length).metadata(m -> m.set(metadata)); + return this; + } + public KafkaBeginExBuilder build() { final KafkaGroupBeginExFW groupBeginEx = groupBeginExRW.build(); @@ -1059,11 +1215,15 @@ public KafkaBeginExBuilder build() public final class KafkaConsumerBeginExBuilder { private final KafkaConsumerBeginExFW.Builder consumerBeginExRW = new KafkaConsumerBeginExFW.Builder(); + private final MutableDirectBuffer partitionBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final Array32FW.Builder partitionsRW = + new Array32FW.Builder<>(new KafkaTopicPartitionFW.Builder(), new KafkaTopicPartitionFW()); private KafkaConsumerBeginExBuilder() { consumerBeginExRW.wrap(writeBuffer, KafkaBeginExFW.FIELD_OFFSET_CONSUMER, writeBuffer.capacity()); + partitionsRW.wrap(partitionBuffer, 0, partitionBuffer.capacity()); } public KafkaConsumerBeginExBuilder groupId( @@ -1073,6 +1233,20 @@ public KafkaConsumerBeginExBuilder groupId( return this; } + public KafkaConsumerBeginExBuilder consumerId( + String consumerId) + { + consumerBeginExRW.consumerId(consumerId); + return this; + } + + public KafkaConsumerBeginExBuilder timeout( + int timeout) + { + consumerBeginExRW.timeout(timeout); + return this; + } + public KafkaConsumerBeginExBuilder topic( String topic) { @@ -1083,12 +1257,13 @@ public KafkaConsumerBeginExBuilder topic( public KafkaConsumerBeginExBuilder partition( int partitionId) { - consumerBeginExRW.partitionIds(p -> p.item(i -> i.partitionId(partitionId))); + partitionsRW.item(i -> i.partitionId(partitionId)); return this; } public KafkaBeginExBuilder build() { + consumerBeginExRW.partitionIds(partitionsRW.build()); final KafkaConsumerBeginExFW consumerBeginEx = consumerBeginExRW.build(); beginExRO.wrap(writeBuffer, 0, consumerBeginEx.limit()); return KafkaBeginExBuilder.this; @@ -1218,13 +1393,6 @@ public KafkaProduceDataExBuilder produce() return new KafkaProduceDataExBuilder(); } - public KafkaGroupDataExBuilder group() - { - dataExRW.kind(KafkaApi.GROUP.value()); - - return new KafkaGroupDataExBuilder(); - } - public KafkaConsumerDataExBuilder consumer() { dataExRW.kind(KafkaApi.CONSUMER.value()); @@ -1761,62 +1929,47 @@ public KafkaDataExBuilder build() } } - public final class KafkaGroupDataExBuilder + public final class KafkaConsumerDataExBuilder { - private final KafkaGroupDataExFW.Builder groupDataExRW = new KafkaGroupDataExFW.Builder(); + private final KafkaConsumerDataExFW.Builder consumerDataExRW = new KafkaConsumerDataExFW.Builder(); - private KafkaGroupDataExBuilder() - { - groupDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_GROUP, writeBuffer.capacity()); - } + private final MutableDirectBuffer partitionBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final MutableDirectBuffer assignmentBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final Array32FW.Builder partitionsRW = + new Array32FW.Builder<>(new KafkaTopicPartitionFW.Builder(), new KafkaTopicPartitionFW()); - public KafkaGroupDataExBuilder leaderId( - String leaderId) - { - groupDataExRW.leaderId(leaderId); - return this; - } + private final Array32FW.Builder assignmentsRW = + new Array32FW.Builder<>(new KafkaConsumerAssignmentFW.Builder(), new KafkaConsumerAssignmentFW()); - public KafkaGroupDataExBuilder memberId( - String memberId) + private KafkaConsumerDataExBuilder() { - groupDataExRW.memberId(memberId); - return this; + consumerDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_CONSUMER, writeBuffer.capacity()); + partitionsRW.wrap(partitionBuffer, 0, partitionBuffer.capacity()); + assignmentsRW.wrap(assignmentBuffer, 0, assignmentBuffer.capacity()); } - public KafkaGroupDataExBuilder members( - int members) + public KafkaConsumerDataExBuilder partition( + int partitionId) { - groupDataExRW.members(members); + partitionsRW.item(i -> i.partitionId(partitionId)); return this; } - public KafkaDataExBuilder build() - { - final KafkaGroupDataExFW groupDataEx = groupDataExRW.build(); - dataExRO.wrap(writeBuffer, 0, groupDataEx.limit()); - return KafkaDataExBuilder.this; - } - } - - public final class KafkaConsumerDataExBuilder - { - private final KafkaConsumerDataExFW.Builder consumerDataExRW = new KafkaConsumerDataExFW.Builder(); - - private KafkaConsumerDataExBuilder() - { - consumerDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_GROUP, writeBuffer.capacity()); - } - - public KafkaConsumerDataExBuilder partition( + public KafkaConsumerDataExBuilder assignment( + String consumerId, int partitionId) { - consumerDataExRW.partitions(p -> p.item(i -> i.partitionId(partitionId))); + assignmentsRW.item(i -> i + .consumerId(consumerId) + .partitions(p -> p.item(tp -> tp.partitionId(partitionId)))); + return this; } public KafkaDataExBuilder build() { + consumerDataExRW.partitions(partitionsRW.build()); + consumerDataExRW.assignments(assignmentsRW.build()); final KafkaConsumerDataExFW consumerDataEx = consumerDataExRW.build(); dataExRO.wrap(writeBuffer, 0, consumerDataEx.limit()); return KafkaDataExBuilder.this; @@ -1835,12 +1988,11 @@ private KafkaOffsetFetchDataExBuilder() public KafkaOffsetFetchDataExBuilder topic( String topic, int partitionId, - long stableOffset, - long latestOffset) + long offset) { offsetFetchDataExRW.topic(t -> t.topic(topic).offsets(o -> o.item(i -> - i.partitionId(partitionId).stableOffset(stableOffset).latestOffset(latestOffset)))); + i.partitionId(partitionId).partitionOffset(offset)))); return this; } @@ -1947,105 +2099,182 @@ public final class KafkaMergedFlushExBuilder private KafkaMergedFlushExBuilder() { - mergedFlushExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_FETCH, writeBuffer.capacity()); + mergedFlushExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_MERGED, writeBuffer.capacity()); } - public KafkaMergedFlushExBuilder progress( - int partitionId, - long offset) + public KafkaMergedFetchFlushExBuilder fetch() { - progress(partitionId, offset, DEFAULT_LATEST_OFFSET); - return this; + mergedFlushExRW.kind(KafkaApi.FETCH.value()); + + return new KafkaMergedFetchFlushExBuilder(); } - public KafkaMergedFlushExBuilder progress( - int partitionId, - long offset, - long latestOffset) + public KafkaMergedConsumerFlushExBuilder consumer() { - mergedFlushExRW.progressItem(p -> p.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset)); - return this; + mergedFlushExRW.kind(KafkaApi.CONSUMER.value()); + + return new KafkaMergedConsumerFlushExBuilder(); + } + + public KafkaFlushExBuilder build() + { + final KafkaMergedFlushExFW mergedFlushEx = mergedFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, mergedFlushEx.limit()); + return KafkaFlushExBuilder.this; } - public KafkaMergedFlushExBuilder progress( + public final class KafkaMergedFetchFlushExBuilder + { + private final KafkaMergedFetchFlushExFW.Builder mergedFetchFlushExRW = new KafkaMergedFetchFlushExFW.Builder(); + + private KafkaMergedFetchFlushExBuilder() + { + mergedFetchFlushExRW.wrap(writeBuffer, + KafkaFlushExFW.FIELD_OFFSET_MERGED + KafkaMergedFlushExFW.FIELD_OFFSET_FETCH, + writeBuffer.capacity()); + } + + public KafkaMergedFetchFlushExBuilder progress( + int partitionId, + long offset) + { + progress(partitionId, offset, DEFAULT_LATEST_OFFSET); + return this; + } + + public KafkaMergedFetchFlushExBuilder progress( + int partitionId, + long offset, + long latestOffset) + { + mergedFetchFlushExRW.progressItem(p -> + p.partitionId(partitionId) + .partitionOffset(offset) + .latestOffset(latestOffset)); + return this; + } + + public KafkaMergedFetchFlushExBuilder progress( int partitionId, long offset, long stableOffset, long latestOffset) - { - mergedFlushExRW.progressItem(p -> p + { + mergedFetchFlushExRW.progressItem(p -> p .partitionId(partitionId) .partitionOffset(offset) .stableOffset(stableOffset) .latestOffset(latestOffset)); - return this; - } + return this; + } - public KafkaMergedFlushExBuilder capabilities( - String capabilities) - { - mergedFlushExRW.capabilities(c -> c.set(KafkaCapabilities.valueOf(capabilities))); - return this; - } + public KafkaMergedFetchFlushExBuilder capabilities( + String capabilities) + { + mergedFetchFlushExRW.capabilities(c -> c.set(KafkaCapabilities.valueOf(capabilities))); + return this; + } - public KafkaFilterBuilder filter() - { - return new KafkaFilterBuilder<>() + public KafkaFilterBuilder filter() { + return new KafkaFilterBuilder<>() + { - @Override - protected KafkaMergedFlushExBuilder build( - KafkaFilterFW filter) + @Override + protected KafkaMergedFetchFlushExBuilder build( + KafkaFilterFW filter) + { + mergedFetchFlushExRW.filtersItem(fb -> set(fb, filter)); + return KafkaFlushExBuilder.KafkaMergedFlushExBuilder.KafkaMergedFetchFlushExBuilder.this; + } + }; + } + + public KafkaMergedFetchFlushExBuilder partition( + int partitionId, + long partitionOffset) + { + partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); + return this; + } + + public KafkaMergedFetchFlushExBuilder partition( + int partitionId, + long partitionOffset, + long latestOffset) + { + mergedFetchFlushExRW.partition(p -> p + .partitionId(partitionId) + .partitionOffset(partitionOffset) + .latestOffset(latestOffset)); + return this; + } + + + public KafkaMergedFetchFlushExBuilder key( + String key) + { + if (key == null) { - mergedFlushExRW.filtersItem(fb -> set(fb, filter)); - return KafkaMergedFlushExBuilder.this; + mergedFetchFlushExRW.key(m -> m.length(-1) + .value((OctetsFW) null)); } - }; - } + else + { + keyRO.wrap(key.getBytes(UTF_8)); + mergedFetchFlushExRW.key(k -> k.length(keyRO.capacity()) + .value(keyRO, 0, keyRO.capacity())); + } + return this; + } - public KafkaMergedFlushExBuilder partition( - int partitionId, - long partitionOffset) - { - partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); - return this; + public KafkaFlushExBuilder build() + { + final KafkaMergedFetchFlushExFW mergedFetchFlushEx = mergedFetchFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, mergedFetchFlushExRW.limit()); + return KafkaFlushExBuilder.this; + } } - public KafkaMergedFlushExBuilder partition( - int partitionId, - long partitionOffset, - long latestOffset) + public final class KafkaMergedConsumerFlushExBuilder { - mergedFlushExRW.partition(p -> p - .partitionId(partitionId) - .partitionOffset(partitionOffset) - .latestOffset(latestOffset)); - return this; - } + private final KafkaMergedConsumerFlushExFW.Builder mergedConsumerFlushExRW = + new KafkaMergedConsumerFlushExFW.Builder(); + private KafkaMergedConsumerFlushExBuilder() + { + mergedConsumerFlushExRW.wrap(writeBuffer, + KafkaFlushExFW.FIELD_OFFSET_MERGED + KafkaMergedFlushExFW.FIELD_OFFSET_CONSUMER, + writeBuffer.capacity()); + } - public KafkaMergedFlushExBuilder key( - String key) - { - if (key == null) + public KafkaMergedConsumerFlushExBuilder partition( + int partitionId, + long partitionOffset) { - mergedFlushExRW.key(m -> m.length(-1) - .value((OctetsFW) null)); + partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); + return this; } - else + + public KafkaMergedConsumerFlushExBuilder partition( + int partitionId, + long partitionOffset, + long latestOffset) { - keyRO.wrap(key.getBytes(UTF_8)); - mergedFlushExRW.key(k -> k.length(keyRO.capacity()) - .value(keyRO, 0, keyRO.capacity())); + mergedConsumerFlushExRW.partition(p -> p + .partitionId(partitionId) + .partitionOffset(partitionOffset) + .latestOffset(latestOffset)); + return this; } - return this; - } - public KafkaFlushExBuilder build() - { - final KafkaMergedFlushExFW mergedFlushEx = mergedFlushExRW.build(); - flushExRO.wrap(writeBuffer, 0, mergedFlushEx.limit()); - return KafkaFlushExBuilder.this; + public KafkaFlushExBuilder build() + { + final KafkaMergedConsumerFlushExFW mergedConsumerFlushEx = mergedConsumerFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, mergedConsumerFlushExRW.limit()); + return KafkaFlushExBuilder.this; + } } } @@ -2082,9 +2311,9 @@ public KafkaFetchFlushExBuilder partition( long latestOffset) { fetchFlushExRW.partition(p -> p.partitionId(partitionId) - .partitionOffset(offset) - .stableOffset(stableOffset) - .latestOffset(latestOffset)); + .partitionOffset(offset) + .stableOffset(stableOffset) + .latestOffset(latestOffset)); return this; } @@ -2177,46 +2406,52 @@ public KafkaFlushExBuilder build() public final class KafkaGroupFlushExBuilder { - private final KafkaGroupFlushExFW.Builder groupFlushExRW = new KafkaGroupFlushExFW.Builder(); + private final MutableDirectBuffer memberBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final KafkaGroupFlushExFW.Builder flushGroupExRW = new KafkaGroupFlushExFW.Builder(); + private final Array32FW.Builder memberRW = + new Array32FW.Builder<>(new KafkaGroupMemberFW.Builder(), new KafkaGroupMemberFW()); private KafkaGroupFlushExBuilder() { - groupFlushExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_FETCH, writeBuffer.capacity()); + flushGroupExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_GROUP, writeBuffer.capacity()); + memberRW.wrap(memberBuffer, 0, memberBuffer.capacity()); } - public KafkaGroupFlushExBuilder partition( - int partitionId, - long partitionOffset) + public KafkaGroupFlushExBuilder leaderId( + String leaderId) { - partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); + flushGroupExRW.leaderId(leaderId); return this; } - public KafkaGroupFlushExBuilder partition( - int partitionId, - long partitionOffset, - long latestOffset) + public KafkaGroupFlushExBuilder memberId( + String memberId) { - partition(partitionId, partitionOffset, latestOffset, latestOffset); + flushGroupExRW.memberId(memberId); return this; } - public KafkaGroupFlushExBuilder partition( - int partitionId, - long offset, - long stableOffset, - long latestOffset) + public KafkaGroupFlushExBuilder members( + String memberId, + byte[] metadata) { - groupFlushExRW.partition(p -> p.partitionId(partitionId) - .partitionOffset(offset) - .stableOffset(stableOffset) - .latestOffset(latestOffset)); + memberRW.item(gm -> gm.id(memberId) + .metadataLen(metadata.length) + .metadata(md -> md.set(metadata))); + return this; + } + + public KafkaGroupFlushExBuilder members( + String memberId) + { + memberRW.item(gm -> gm.id(memberId)); return this; } public KafkaFlushExBuilder build() { - final KafkaGroupFlushExFW groupFlushEx = groupFlushExRW.build(); + flushGroupExRW.members(memberRW.build()); + final KafkaGroupFlushExFW groupFlushEx = flushGroupExRW.build(); flushExRO.wrap(writeBuffer, 0, groupFlushEx.limit()); return KafkaFlushExBuilder.this; } @@ -2301,15 +2536,6 @@ public KafkaProduceDataExMatcherBuilder produce() return matcherBuilder; } - public KafkaGroupDataExMatchBuilder group() - { - final KafkaGroupDataExMatchBuilder matcherBuilder = new KafkaGroupDataExMatchBuilder(); - - this.kind = KafkaApi.GROUP.value(); - this.caseMatcher = matcherBuilder::match; - return matcherBuilder; - } - public KafkaDataExMatcherBuilder typeId( int typeId) { @@ -3079,70 +3305,6 @@ private boolean matchFilters( return filters == null || filters == mergedDataEx.filters(); } } - - public final class KafkaGroupDataExMatchBuilder - { - private String16FW leaderId; - private String16FW memberId; - private Integer members; - - private KafkaGroupDataExMatchBuilder() - { - } - - public KafkaGroupDataExMatchBuilder leaderId( - String leaderId) - { - this.leaderId = new String16FW(leaderId); - return this; - } - - public KafkaGroupDataExMatchBuilder memberId( - String memberId) - { - this.memberId = new String16FW(memberId); - return this; - } - - public KafkaGroupDataExMatchBuilder members( - int members) - { - this.members = Integer.valueOf(members); - return this; - } - - public KafkaDataExMatcherBuilder build() - { - return KafkaDataExMatcherBuilder.this; - } - - private boolean match( - KafkaDataExFW dataEx) - { - final KafkaGroupDataExFW groupDataEx = dataEx.group(); - return matchLeaderId(groupDataEx) && - matchMemberId(groupDataEx) && - matchmembers(groupDataEx); - } - - private boolean matchLeaderId( - final KafkaGroupDataExFW groupDataEx) - { - return leaderId == null || leaderId.equals(groupDataEx.leaderId()); - } - - private boolean matchMemberId( - final KafkaGroupDataExFW groupDataEx) - { - return memberId == null || memberId.equals(groupDataEx.memberId()); - } - - private boolean matchmembers( - final KafkaGroupDataExFW groupDataEx) - { - return members != null && members == groupDataEx.members(); - } - } } public static final class KafkaFlushExMatcherBuilder @@ -3183,6 +3345,15 @@ public KafkaProduceFlushExMatcherBuilder produce() return matcherBuilder; } + public KafkaGroupFlushExMatchBuilder group() + { + final KafkaGroupFlushExMatchBuilder matcherBuilder = new KafkaGroupFlushExMatchBuilder(); + + this.kind = KafkaApi.GROUP.value(); + this.caseMatcher = matcherBuilder::match; + return matcherBuilder; + } + public KafkaFlushExMatcherBuilder typeId( int typeId) { @@ -3306,7 +3477,6 @@ public KafkaFilterBuilder() { - @Override protected KafkaFlushExMatcherBuilder.KafkaFetchFlushExMatcherBuilder build( KafkaFilterFW filter) @@ -3348,162 +3518,192 @@ private boolean matchFilters( { return filtersRW == null || filtersRW.build().equals(fetchFlushEx.filters()); } - } public final class KafkaMergedFlushExMatcherBuilder { - private Array32FW.Builder progressRW; - private KafkaKeyFW.Builder keyRW; - private KafkaOffsetFW.Builder partitionRW; - - private Array32FW.Builder filtersRW; + KafkaMergedFetchFlushEx mergedFetchFlush; private KafkaMergedFlushExMatcherBuilder() { } - public KafkaMergedFlushExMatcherBuilder progress( - int partitionId, - long offset) + public boolean match( + KafkaFlushExFW kafkaFlushEx) { - progress(partitionId, offset, DEFAULT_LATEST_OFFSET); - return this; + boolean matched = false; + if (kafkaFlushEx.merged().kind() == KafkaApi.FETCH.value()) + { + matched = fetch().match(kafkaFlushEx); + } + return matched; } - public KafkaMergedFlushExMatcherBuilder progress( - int partitionId, - long offset, - long latestOffset) + public KafkaMergedFetchFlushEx fetch() { - if (progressRW == null) + if (mergedFetchFlush == null) { - this.progressRW = new Array32FW.Builder<>(new KafkaOffsetFW.Builder(), new KafkaOffsetFW()) - .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + mergedFetchFlush = new KafkaMergedFetchFlushEx(); } - progressRW.item(i -> i.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset)); - return this; + return mergedFetchFlush; } - public KafkaMergedFlushExMatcherBuilder progress( - int partitionId, - long offset, - long stableOffset, - long latestOffset) + public final class KafkaMergedFetchFlushEx { - if (progressRW == null) + private Array32FW.Builder progressRW; + private KafkaKeyFW.Builder keyRW; + private KafkaOffsetFW.Builder partitionRW; + + private Array32FW.Builder filtersRW; + + private KafkaMergedFetchFlushEx() { - this.progressRW = new Array32FW.Builder<>(new KafkaOffsetFW.Builder(), new KafkaOffsetFW()) + } + + public KafkaMergedFetchFlushEx progress( + int partitionId, + long offset) + { + progress(partitionId, offset, DEFAULT_LATEST_OFFSET); + return this; + } + + public KafkaMergedFetchFlushEx progress( + int partitionId, + long offset, + long latestOffset) + { + if (progressRW == null) + { + this.progressRW = new Array32FW.Builder<>(new KafkaOffsetFW.Builder(), new KafkaOffsetFW()) .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + } + progressRW.item(i -> i.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset)); + return this; } - progressRW.item(i -> i + + public KafkaMergedFetchFlushEx progress( + int partitionId, + long offset, + long stableOffset, + long latestOffset) + { + if (progressRW == null) + { + this.progressRW = new Array32FW.Builder<>(new KafkaOffsetFW.Builder(), new KafkaOffsetFW()) + .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + } + progressRW.item(i -> i .partitionId(partitionId) .partitionOffset(offset) .stableOffset(stableOffset) .latestOffset(latestOffset)); - return this; - } - - public KafkaMergedFlushExMatcherBuilder partition( - int partitionId, - long offset) - { - partition(partitionId, offset, DEFAULT_LATEST_OFFSET); - return this; - } - - public KafkaMergedFlushExMatcherBuilder partition( - int partitionId, - long offset, - long latestOffset) - { - assert partitionRW == null; - partitionRW = new KafkaOffsetFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + return this; + } - partitionRW.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset); + public KafkaMergedFetchFlushEx partition( + int partitionId, + long offset) + { + partition(partitionId, offset, DEFAULT_LATEST_OFFSET); + return this; + } - return this; - } + public KafkaMergedFetchFlushEx partition( + int partitionId, + long offset, + long latestOffset) + { + assert partitionRW == null; + partitionRW = new KafkaOffsetFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - public KafkaMergedFlushExMatcherBuilder key( - String key) - { - assert keyRW == null; - keyRW = new KafkaKeyFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + partitionRW.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset); - if (key == null) - { - keyRW.length(-1) - .value((OctetsFW) null); + return this; } - else + + public KafkaMergedFetchFlushEx key( + String key) { - keyRO.wrap(key.getBytes(UTF_8)); - keyRW.length(keyRO.capacity()) - .value(keyRO, 0, keyRO.capacity()); - } + assert keyRW == null; + keyRW = new KafkaKeyFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - return this; - } + if (key == null) + { + keyRW.length(-1) + .value((OctetsFW) null); + } + else + { + keyRO.wrap(key.getBytes(UTF_8)); + keyRW.length(keyRO.capacity()) + .value(keyRO, 0, keyRO.capacity()); + } - public KafkaFilterBuilder filter() - { - if (filtersRW == null) - { - filtersRW = new Array32FW.Builder<>(new KafkaFilterFW.Builder(), new KafkaFilterFW()) - .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + return this; } - return new KafkaFilterBuilder<>() + public KafkaFilterBuilder + + filter() { - - @Override - protected KafkaMergedFlushExMatcherBuilder build( - KafkaFilterFW filter) + if (filtersRW == null) { - filtersRW.item(fb -> set(fb, filter)); - return KafkaMergedFlushExMatcherBuilder.this; + filtersRW = new Array32FW.Builder<>(new KafkaFilterFW.Builder(), new KafkaFilterFW()) + .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); } - }; - } - public KafkaFlushExMatcherBuilder build() - { - return KafkaFlushExMatcherBuilder.this; - } + return new KafkaFilterBuilder<>() + { + @Override + protected KafkaFlushExMatcherBuilder.KafkaMergedFlushExMatcherBuilder.KafkaMergedFetchFlushEx + build( + KafkaFilterFW filter) + { + filtersRW.item(fb -> set(fb, filter)); + return KafkaMergedFetchFlushEx.this; + } + }; + } - private boolean match( - KafkaFlushExFW flushEx) - { - final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); - return matchProgress(mergedFlushEx) && - matchKey(mergedFlushEx) && - matchPartition(mergedFlushEx) && - matchFilters(mergedFlushEx); - } + public KafkaFlushExMatcherBuilder build() + { + return KafkaFlushExMatcherBuilder.this; + } - private boolean matchProgress( - final KafkaMergedFlushExFW mergedFlushEx) - { - return progressRW == null || progressRW.build().equals(mergedFlushEx.progress()); - } + private boolean match( + KafkaFlushExFW flushEx) + { + final KafkaMergedFetchFlushExFW fetch = flushEx.merged().fetch(); + return matchProgress(fetch) && + matchKey(fetch) && + matchPartition(fetch) && + matchFilters(fetch); + } - private boolean matchPartition( - final KafkaMergedFlushExFW mergedFlushEx) - { - return partitionRW == null || partitionRW.build().equals(mergedFlushEx.partition()); - } + private boolean matchProgress( + final KafkaMergedFetchFlushExFW mergedFlush) + { + return progressRW == null || progressRW.build().equals(mergedFlush.progress()); + } - private boolean matchKey( - final KafkaMergedFlushExFW mergedFlushEx) - { - return keyRW == null || keyRW.build().equals(mergedFlushEx.key()); - } + private boolean matchPartition( + final KafkaMergedFetchFlushExFW mergedFlush) + { + return partitionRW == null || partitionRW.build().equals(mergedFlush.partition()); + } - private boolean matchFilters( - final KafkaMergedFlushExFW mergedFlushEx) - { - return filtersRW == null || filtersRW.build().equals(mergedFlushEx.filters()); + private boolean matchKey( + final KafkaMergedFetchFlushExFW mergedFlush) + { + return keyRW == null || keyRW.build().equals(mergedFlush.key()); + } + + private boolean matchFilters( + final KafkaMergedFetchFlushExFW mergedFlush) + { + return filtersRW == null || filtersRW.build().equals(mergedFlush.filters()); + } } } @@ -3583,6 +3783,77 @@ private boolean matchKey( return keyRW == null || keyRW.build().equals(produceFlushEx.key()); } } + + public final class KafkaGroupFlushExMatchBuilder + { + private String16FW leaderId; + private String16FW memberId; + private Array32FW.Builder membersRW; + + private KafkaGroupFlushExMatchBuilder() + { + } + + public KafkaGroupFlushExMatchBuilder leaderId( + String leaderId) + { + this.leaderId = new String16FW(leaderId); + return this; + } + + public KafkaGroupFlushExMatchBuilder memberId( + String memberId) + { + this.memberId = new String16FW(memberId); + return this; + } + + public KafkaGroupFlushExMatchBuilder members( + String memberId, + String metadata) + { + if (membersRW == null) + { + this.membersRW = new Array32FW.Builder<>(new KafkaGroupMemberFW.Builder(), new KafkaGroupMemberFW()) + .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + } + this.membersRW.item(m -> m.id(memberId).metadataLen(metadata.length()) + .metadata(md -> md.set(metadata.getBytes()))); + return this; + } + + public KafkaFlushExMatcherBuilder build() + { + return KafkaFlushExMatcherBuilder.this; + } + + private boolean match( + KafkaFlushExFW flushEx) + { + final KafkaGroupFlushExFW groupFlushEx = flushEx.group(); + return matchLeaderId(groupFlushEx) && + matchMemberId(groupFlushEx) && + matchMembers(groupFlushEx); + } + + private boolean matchLeaderId( + final KafkaGroupFlushExFW groupFLushEx) + { + return leaderId == null || leaderId.equals(groupFLushEx.leaderId()); + } + + private boolean matchMemberId( + final KafkaGroupFlushExFW groupFLushEx) + { + return memberId == null || memberId.equals(groupFLushEx.memberId()); + } + + private boolean matchMembers( + final KafkaGroupFlushExFW groupFLushEx) + { + return membersRW == null || membersRW.build().equals(groupFLushEx.members()); + } + } } public static final class KafkaBeginExMatcherBuilder @@ -3956,6 +4227,8 @@ public final class KafkaGroupBeginExMatcherBuilder private String16FW protocol; private int timeout; + private byte[] metadata; + private KafkaGroupBeginExMatcherBuilder() { } @@ -3981,6 +4254,13 @@ public KafkaGroupBeginExMatcherBuilder timeout( return this; } + public KafkaGroupBeginExMatcherBuilder metadata( + byte[] metadata) + { + this.metadata = metadata; + return this; + } + public KafkaBeginExMatcherBuilder build() { return KafkaBeginExMatcherBuilder.this; @@ -3993,7 +4273,8 @@ private boolean match( return matchGroupId(groupBeginEx) && matchGroupId(groupBeginEx) && matchProtocol(groupBeginEx) && - matchTimeout(groupBeginEx); + matchTimeout(groupBeginEx) && + matchMetadata(groupBeginEx); } private boolean matchGroupId( @@ -4013,6 +4294,13 @@ private boolean matchTimeout( { return timeout == 0 || timeout == groupBeginExFW.timeout(); } + + private boolean matchMetadata( + final KafkaGroupBeginExFW groupBeginExFW) + { + OctetsFW metadata = groupBeginExFW.metadata(); + return this.metadata == null || metadata.sizeof() == this.metadata.length; + } } public final class KafkaMergedBeginExMatcherBuilder diff --git a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl index 29b072619f..fb0078d524 100644 --- a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl +++ b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl @@ -195,7 +195,6 @@ scope kafka union KafkaDataEx switch (uint8) extends core::stream::Extension { case 252: kafka::stream::KafkaConsumerDataEx consumer; - case 253: kafka::stream::KafkaGroupDataEx group; case 255: kafka::stream::KafkaMergedDataEx merged; case 3: kafka::stream::KafkaMetaDataEx meta; case 8: kafka::stream::KafkaOffsetCommitDataEx offsetCommit; @@ -230,6 +229,7 @@ scope kafka string16 topic; string16 groupId = null; string16 consumerId = null; + int32 timeout = 0; KafkaOffset[] partitions; KafkaFilter[] filters; // ORed KafkaEvaluation evaluation = LAZY; @@ -251,7 +251,13 @@ scope kafka KafkaHeader[] headers; // INIT + FIN (produce), INIT only (fetch) } - struct KafkaMergedFlushEx + union KafkaMergedFlushEx switch (uint8) + { + case 252: kafka::stream::KafkaMergedConsumerFlushEx consumer; + case 1: kafka::stream::KafkaMergedFetchFlushEx fetch; + } + + struct KafkaMergedFetchFlushEx { KafkaOffset partition; KafkaOffset[] progress; @@ -260,9 +266,9 @@ scope kafka KafkaKey key; } - struct KafkaGroupFlushEx + struct KafkaMergedConsumerFlushEx { - KafkaOffset partition; + KafkaOffset partition; } struct KafkaMetaBeginEx @@ -343,41 +349,71 @@ scope kafka KafkaHeader[] headers; } + struct KafkaTopicPartition + { + int32 partitionId; + } + + struct KafkaGroupTopicMetadata + { + string16 topic; + KafkaTopicPartition[] partitions; + } + + struct KafkaGroupMemberMetadata + { + string16 consumerId; + KafkaGroupTopicMetadata[] topics; + } + struct KafkaGroupBeginEx { string16 groupId; string16 protocol; int32 timeout; + varint32 metadataLen; + octets[metadataLen] metadata = null; } - struct KafkaGroupDataEx + struct KafkaGroupMember { - string16 leaderId; - string16 memberId; - int32 members; + string16 id; + varint32 metadataLen; + octets[metadataLen] metadata = null; } - struct TopicPartition + struct KafkaGroupFlushEx { - int32 partitionId; + string16 leaderId; + string16 memberId; + KafkaGroupMember[] members; } struct KafkaConsumerBeginEx { string16 groupId; + string16 consumerId; + int32 timeout; string16 topic; - TopicPartition[] partitionIds; + KafkaTopicPartition[] partitionIds; + } + + struct KafkaConsumerAssignment + { + string16 consumerId; + KafkaTopicPartition[] partitions; } struct KafkaConsumerDataEx { - TopicPartition[] partitions; + KafkaTopicPartition[] partitions; + KafkaConsumerAssignment[] assignments; } struct KafkaOffsetFetchTopic { string16 topic; - TopicPartition[] partitions; + KafkaTopicPartition[] partitions; } struct KafkaOffsetFetchBeginEx @@ -409,4 +445,36 @@ scope kafka int64 partitionOffset; } } + + scope rebalance + { + struct TopicPartition + { + int32 partitionId; + } + + struct ConsumerAssignment + { + string16 consumerId; + TopicPartition[] partitions; + } + + struct TopicAssignment + { + string16 topic; + TopicPartition[] partitions; + ConsumerAssignment[] userdata; + } + + struct MemberAssignment + { + string16 memberId; + TopicAssignment[] assignments; + } + + struct PartitionIndex + { + int32 index; + } + } } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt new file mode 100644 index 0000000000..900ec59160 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt @@ -0,0 +1,40 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .assignment("localhost:9092", 0) + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt new file mode 100644 index 0000000000..17e8dc42c5 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt @@ -0,0 +1,48 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test") + .partition(0) + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .assignment("localhost:9092", 0) + .build() + .build()} + +write zilla:data.empty +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt index cb2deade38..cf59d81824 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt @@ -40,13 +40,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() write advise zilla:flush -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt index 52b3df2fa1..7076315b97 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt @@ -45,12 +45,16 @@ write flush read advised zilla:flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt index e1d426f7bc..ee170242ea 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt @@ -38,13 +38,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt index e294b4e29e..73e30d082e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt @@ -43,12 +43,16 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt new file mode 100644 index 0000000000..282b24d83b --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt @@ -0,0 +1,64 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build()) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build()) + .build() + .build()} + +write ${kafka:memberAssignment() + .member("memberId-1", "test", 0, "localhost:9092", 0) + .build()} +write flush + +read ${kafka:topicAssignment() + .topic("test", 0, "localhost:9092", 0) + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt new file mode 100644 index 0000000000..8367f6fc54 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt @@ -0,0 +1,69 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build()) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build()) + .build() + .build()} + +read ${kafka:memberAssignment() + .member("memberId-1", "test", 0, "localhost:9092", 0) + .build()} + +write ${kafka:topicAssignment() + .topic("test", 0, "localhost:9092", 0) + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt index 0ad5b88eb7..be863d5e6f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -38,15 +38,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} -read zilla:data.ext ${kafka:dataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty read notify ROUTED_BROKER_SERVER @@ -76,12 +80,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt index 612219b422..63f3642b46 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -43,14 +43,18 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush read abort @@ -78,13 +82,16 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -write flush +read zilla:data.empty + +write zilla:data.empty +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt index 97fe944ccb..d53487e538 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt @@ -38,27 +38,36 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty write advise zilla:flush -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(2) + .members("memberId-1") + .members("memberId-2") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty write close read closed diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt index 3cad0f6c66..ca3b929e93 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt @@ -43,27 +43,35 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush read advised zilla:flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(2) + .members("memberId-1") + .members("memberId-2") .build() .build()} -write flush + +read zilla:data.empty + +write zilla:data.empty read closed write close diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt index d148031a57..a6a92ea2ad 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt @@ -38,15 +38,19 @@ read zilla:begin.ext ${kafka:beginEx() .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty read notify ROUTED_BROKER_SERVER diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt index b2ffbdad10..09f08d8a9b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt @@ -43,14 +43,18 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush rejected diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt new file mode 100644 index 0000000000..6937ab1880 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt @@ -0,0 +1,65 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +write zilla:data.empty +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt new file mode 100644 index 0000000000..5ca156ca20 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt @@ -0,0 +1,69 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +read zilla:data.empty + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +read zilla:data.empty + +write zilla:data.empty +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/client.rpt index 46b8645895..bee1c37b8a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/client.rpt @@ -38,6 +38,7 @@ connected read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 2, 2, 2) .build() @@ -57,6 +58,7 @@ read "Hello, world #A5" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("header3") diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/server.rpt index 131b4661c7..0662c289a2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/server.rpt @@ -43,6 +43,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 2, 2, 2) .build() @@ -64,6 +65,7 @@ write flush read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .filter() .headers("header3") .sequence("one") diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/client.rpt index 65797fa317..95bb8dc375 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/client.rpt @@ -46,6 +46,7 @@ read "Hello, world #A1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/server.rpt index e6aef8b810..646925b42c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/server.rpt @@ -51,6 +51,7 @@ write "Hello, world #A1" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} @@ -102,4 +103,4 @@ write zilla:data.ext ${kafka:dataEx() .header("header2", "value2") .build() .build()} -write "Hello, world #A4" \ No newline at end of file +write "Hello, world #A4" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/client.rpt index 979c34e4c9..3f68ef3c90 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/client.rpt @@ -83,6 +83,7 @@ read notify RECEIVED_MESSAGE_B2 read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 1, 2) .progress(1, 3, 1, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/server.rpt index 0a10a9bbb4..edd7106c28 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/server.rpt @@ -92,6 +92,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 1, 2) .progress(1, 3, 1, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/client.rpt index be72e1a5a0..593b4fc303 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/client.rpt @@ -82,6 +82,7 @@ read notify RECEIVED_MESSAGE_B2 read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/server.rpt index 592b71500c..99dc1173f7 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/server.rpt @@ -91,6 +91,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/client.rpt index 2996614512..c178993714 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/client.rpt @@ -43,6 +43,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 2, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/server.rpt index 061d4156ab..bd995750f6 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/server.rpt @@ -49,6 +49,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 2, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/client.rpt index ab1d6d8a79..70956daee5 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/client.rpt @@ -83,6 +83,7 @@ read notify RECEIVED_MESSAGE_B2 read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/server.rpt index c19fa6f57c..66873f85e8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/server.rpt @@ -92,6 +92,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/client.rpt index dea36332fb..1a4b4e8acb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/client.rpt @@ -74,6 +74,7 @@ read "Hello, world #B2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/server.rpt index 6eb28b40cb..9b31a8b5de 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/server.rpt @@ -87,6 +87,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt new file mode 100644 index 0000000000..dc96fdf72f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .partition(0, 1) + .partition(1, 1) + .partition(-1, 1) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .partition(0, 1, 2) + .progress(0, 2) + .build() + .build()} +read "Hello, world #A1" + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt new file mode 100644 index 0000000000..cadddb5324 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt @@ -0,0 +1,51 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .partition(0, 1) + .partition(1, 1) + .partition(-1, 1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .progress(0, 2) + .build() + .build()} +write "Hello, world #A1" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/client.rpt index f8c85d060b..ffb8003d11 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/client.rpt @@ -46,6 +46,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .key("key7") @@ -77,6 +78,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .key("key7") @@ -108,6 +110,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .key("key9") .capabilities("PRODUCE_ONLY") @@ -129,6 +132,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .key("key9") diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/server.rpt index 42d6dc55dd..c5e536ab1f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/server.rpt @@ -43,6 +43,7 @@ read "Hello, world #A1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .key("key7") .build() @@ -69,6 +70,7 @@ read "Hello, world #A2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .key("key7") .build() @@ -95,9 +97,10 @@ read "Hello, world #C1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .partition(-1, -1) - .key("key9") - .build() + .fetch() + .partition(-1, -1) + .key("key9") + .build() .build()} read zilla:data.ext ${kafka:matchDataEx() @@ -112,7 +115,8 @@ read "Hello, world #C2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .partition(-1, -1) - .key("key9") - .build() + .fetch() + .partition(-1, -1) + .key("key9") + .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/client.rpt index 5277c6dc7a..d40c360bc3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/client.rpt @@ -45,6 +45,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .build() @@ -73,6 +74,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .build() @@ -91,6 +93,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .build() @@ -119,6 +122,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/server.rpt index 1eca6de001..67ef157d76 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/server.rpt @@ -42,6 +42,7 @@ read "Hello, world #A1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .build() .build()} @@ -66,6 +67,7 @@ read "Hi, world #C1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .build() .build()} @@ -81,6 +83,7 @@ read "Hello, world #A2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .build() .build()} @@ -105,6 +108,7 @@ read "Hi, world #C2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/client.rpt index 7731d53481..7f787cba3f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/client.rpt @@ -45,6 +45,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(0, 1) .capabilities("PRODUCE_ONLY") .build() @@ -73,6 +74,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(0, 2) .capabilities("PRODUCE_ONLY") .build() @@ -101,6 +103,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(2, 1) .capabilities("PRODUCE_ONLY") .build() @@ -119,7 +122,8 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(2, 2) .capabilities("PRODUCE_ONLY") .build() - .build()} \ No newline at end of file + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/server.rpt index 985213206a..96c3563c19 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/server.rpt @@ -42,6 +42,7 @@ read "Hello, world #A1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(0, 1) .build() .build()} @@ -65,6 +66,7 @@ read "Hello, world #A2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(0, 2) .build() .build()} @@ -88,6 +90,7 @@ read "Hi, world #C1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(2, 1) .build() .build()} @@ -103,6 +106,7 @@ read "Hi, world #C2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(2, 2) .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt new file mode 100644 index 0000000000..e408dfe143 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt @@ -0,0 +1,170 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .build() + .build()} + + +read notify PARTITION_COUNT_2 + +connect await PARTITION_COUNT_2 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .assignment("localhost:9092", 0) + .build() + .build()} + +read notify RECEIVED_CONSUMER + +connect await RECEIVED_CONSUMER + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .fetch() + .partition(0, 1, 2) + .build() + .build()} +read "Hello, world #A1" + + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt new file mode 100644 index 0000000000..a0f622b99c --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt @@ -0,0 +1,168 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test") + .partition(0) + .partition(1) + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .assignment("localhost:9092", 0) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .fetch() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .build() + .build()} +write "Hello, world #A1" +write flush + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/client.rpt new file mode 100644 index 0000000000..e80abbc037 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/client.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-1") + .topic("test") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .partitionId(0) + .partitionOffset(1) + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/server.rpt new file mode 100644 index 0000000000..9c7c06b8c2 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/server.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-1") + .topic("test") + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .partition(0) + .partition(1) + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/client.rpt new file mode 100644 index 0000000000..ece4511733 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/client.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-1") + .topic("test", 0) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .topic("test", 0, 1) + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/server.rpt new file mode 100644 index 0000000000..cd7d3ce9d0 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/server.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-1") + .topic("test", 0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .topic("test", 0, 1) + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt index b31879fae3..f06928bf4b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt @@ -66,9 +66,51 @@ read 35 # size write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt index c8ee99cba2..1071dc79f8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt @@ -66,6 +66,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt index 661394f51f..b154dda2f3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt @@ -49,9 +49,51 @@ read 35 # size write close read abort -read notify ROUTED_BROKER_SERVER_FIRST +read notify ROUTED_CLUSTER_SERVER_FIRST -connect await ROUTED_BROKER_SERVER_FIRST +connect await ROUTED_CLUSTER_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -117,9 +159,9 @@ read 35 # size write close read abort -read notify ROUTED_BROKER_SERVER_THIRD +read notify ROUTED_CLUSTER_SERVER_SECOND -connect await ROUTED_BROKER_SERVER_THIRD +connect await ROUTED_CLUSTER_SERVER_SECOND "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -127,6 +169,49 @@ connect await ROUTED_BROKER_SERVER_THIRD connected +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER_SECOND + +connect await ROUTED_DESCRIBE_SERVER_SECOND + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + + +connected + write 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt index 0501e11c66..89a6646f74 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 @@ -105,6 +141,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt index 3ebf38ef03..205d723a8e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt index dd66906ce9..e5781e833c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt index 4fece6bbfb..3ffa6e2ca3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER_FIRST -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER_FIRST + +connect await ROUTED_DESCRIBE_SERVER_FIRST "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -131,16 +173,59 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER_THIRD +read notify ROUTED_CLUSTER_SERVER_SECOND -connect await ROUTED_BROKER_SERVER_THIRD "zilla://streams/net0" +connect await ROUTED_CLUSTER_SERVER_SECOND + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER_SECOND + +connect await ROUTED_DESCRIBE_SERVER_SECOND + "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" option zilla:byteorder "network" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt index 62dd9d1a4f..353e127383 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:newRequestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${newRequestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 @@ -57,7 +93,7 @@ read 105 # size 4s "test" # consumer group 30000 # session timeout 4000 # rebalance timeout - 0s # consumer group member + 0s # consumer group member 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol @@ -116,9 +152,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -127,6 +163,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 115 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt index d395166ffd..094ebb3212 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt @@ -42,16 +42,16 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -59,7 +59,49 @@ connect await ROUTED_BROKER_SERVER connected -write 105 # size +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size 11s # join group 5s # v5 ${newRequestId} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt index cf3f407a26..614a5cd0b6 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt index 2bfabe1123..ed4521242e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt index 099f0a0a68..788be98c4c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt index c73a41889a..3f523d8122 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 #port + 0 # coordinator node + 9s "localhost" # host + 9092 #port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt index 75e53e1bbd..4c1030709a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 102 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt index 6dea7afae8..1e7fa59c00 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt index e3a812d391..90912d4823 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt index 6f082da9a8..54c9960f67 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt @@ -71,16 +71,87 @@ read 35 # size 0 #throttle time 0s #no error 4s "none" #error message none - 1 #coordinator node - 9s "localhost" #host - 9092 #port + 0 #coordinator node + 9s "localhost" #host + 9092 #port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 17 # size + 17s # sasl.handshake + 1s # v1 + ${newRequestId} + -1s # no client id + 5s "PLAIN" # mechanism + +read 17 # size + ${newRequestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +write 32 # size + 36s # sasl.authenticate + 1s # v1 + ${newRequestId} + -1s # no client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +read 20 # size + ${newRequestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt index 44d6e465b8..3b7326fcfb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt @@ -67,13 +67,79 @@ write 35 # size 0 #throttle time 0s #no error 4s "none" #error message none - 1 #coordinator node - 9s "localhost" #host - 9092 #port + 0 #coordinator node + 9s "localhost" #host + 9092 #port read closed write aborted +accepted + +connected + +read 17 # size + 17s # sasl.handshake + 1s # v1 + (int:requestId) + -1s # no client id + 5s "PLAIN" # mechanism + +write 17 # size + ${requestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +read 32 # size + 36s # sasl.authenticate + 1s # v1 + (int:requestId) + -1s # no client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +write 20 # size + ${requestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + + accepted connected diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt new file mode 100644 index 0000000000..11f8fe8d16 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 38 # size + 9s # offset fetch + 0s # v0 + ${newRequestId} + -1s # no client id + 8s "client-1" # group id + 1 # topics + 4s "test" # "test" topic + 1 # partitions + 0 # partition + +read 30 # size + 1 # topics + 4s "test" # "test" topic + 1 # partitions + 0 # partition index + 1L # committed offset + -1s # metadata + 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt new file mode 100644 index 0000000000..fc324de30e --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 38 # size + 9s # offset fetch + 0s # v0 + (int:newRequestId) + -1s # no client id + 8s "client-1" # group id + 1 # topics + 4s "test" # "test" topic + 1 # partitions + 0 # partition + +write 30 # size + 1 # topics + 4s "test" # "test" topic + 1 # partitions + 0 # partition index + 1L # committed offset + -1s # metadata + 0s # no error diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index 6be4ef9041..7a1c388c82 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -56,6 +56,8 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaTransactionResult; import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaValueMatchFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.MemberAssignmentFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.TopicAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaApi; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBootstrapBeginExFW; @@ -69,8 +71,8 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFetchFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; -import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; @@ -98,6 +100,55 @@ public void setUp() throws Exception ctx = new ExpressionContext(); } + @Test + public void shouldGenerateMemberMetadata() + { + byte[] build = KafkaFunctions.memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaGroupMemberMetadataFW memberMetadata = + new KafkaGroupMemberMetadataFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals("localhost:9092", memberMetadata.consumerId().asString()); + } + + @Test + public void shouldGenerateMemberAssignment() + { + byte[] build = KafkaFunctions.memberAssignment() + .member("memberId-1", "test", 0, "localhost:9092", 0) + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + Array32FW assignments = + new Array32FW<>(new MemberAssignmentFW()).wrap(buffer, 0, buffer.capacity()); + + assignments.forEach(a -> + { + assertEquals("memberId-1", a.memberId().asString()); + }); + } + + @Test + public void shouldGenerateTopicAssignment() + { + byte[] build = KafkaFunctions.topicAssignment() + .topic("test", 0, "localhost:9092", 0) + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + Array32FW topics = + new Array32FW<>(new TopicAssignmentFW()).wrap(buffer, 0, buffer.capacity()); + + topics.forEach(t -> + { + assertEquals("test", t.topic().asString()); + }); + } + @Test public void shouldGenerateBootstrapBeginExtension() { @@ -880,11 +931,12 @@ public void shouldGenerateMergedDataExtensionWithNullKeyAndNullByteArrayHeaderVa } @Test - public void shouldGenerateMergedFlushExtension() + public void shouldGenerateMergedFetchFlushExtension() { byte[] build = KafkaFunctions.flushEx() .typeId(0x01) .merged() + .fetch() .partition(1, 2) .capabilities("PRODUCE_AND_FETCH") .progress(0, 1L) @@ -904,26 +956,26 @@ public void shouldGenerateMergedFlushExtension() final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); final MutableInteger partitionsCount = new MutableInteger(); - mergedFlushEx.progress().forEach(f -> partitionsCount.value++); + mergedFlushEx.fetch().progress().forEach(f -> partitionsCount.value++); assertEquals(1, partitionsCount.value); - assertNotNull(mergedFlushEx.progress() + assertNotNull(mergedFlushEx.fetch().progress() .matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L)); - assertEquals(mergedFlushEx.key().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)), "key"); - assertEquals(mergedFlushEx.partition().partitionId(), 1); - assertEquals(mergedFlushEx.partition().partitionOffset(), 2); + assertEquals(mergedFlushEx.fetch().key().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)), "key"); + assertEquals(mergedFlushEx.fetch().partition().partitionId(), 1); + assertEquals(mergedFlushEx.fetch().partition().partitionOffset(), 2); final MutableInteger filterCount = new MutableInteger(); - mergedFlushEx.filters().forEach(f -> filterCount.value++); + mergedFlushEx.fetch().filters().forEach(f -> filterCount.value++); assertEquals(2, filterCount.value); - assertNotNull(mergedFlushEx.filters() + assertNotNull(mergedFlushEx.fetch().filters() .matchFirst(f -> f.conditions() .matchFirst(c -> c.kind() == KEY.value() && "match".equals(c.key() .value() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null)); - assertNotNull(mergedFlushEx.filters() + assertNotNull(mergedFlushEx.fetch().filters() .matchFirst(f -> f.conditions() .matchFirst(c -> c.kind() == HEADER.value() && "name".equals(c.header().name() @@ -933,11 +985,12 @@ public void shouldGenerateMergedFlushExtension() } @Test - public void shouldGenerateMergedFlushExtensionWithStableOffset() + public void shouldGenerateMergedFetchFlushExtensionWithStableOffset() { byte[] build = KafkaFunctions.flushEx() .typeId(0x01) .merged() + .fetch() .partition(0, 1L, 1L) .capabilities("PRODUCE_AND_FETCH") .progress(0, 1L, 1L, 1L) @@ -957,29 +1010,29 @@ public void shouldGenerateMergedFlushExtensionWithStableOffset() final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); final MutableInteger partitionsCount = new MutableInteger(); - mergedFlushEx.progress().forEach(f -> partitionsCount.value++); + mergedFlushEx.fetch().progress().forEach(f -> partitionsCount.value++); assertEquals(1, partitionsCount.value); - assertEquals(mergedFlushEx.partition().partitionId(), 0); - assertEquals(mergedFlushEx.partition().partitionOffset(), 1L); - assertEquals(mergedFlushEx.partition().latestOffset(), 1L); + assertEquals(mergedFlushEx.fetch().partition().partitionId(), 0); + assertEquals(mergedFlushEx.fetch().partition().partitionOffset(), 1L); + assertEquals(mergedFlushEx.fetch().partition().latestOffset(), 1L); - assertNotNull(mergedFlushEx.progress() + assertNotNull(mergedFlushEx.fetch().progress() .matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L && p.stableOffset() == 1L && p.latestOffset() == 1L)); final MutableInteger filterCount = new MutableInteger(); - mergedFlushEx.filters().forEach(f -> filterCount.value++); + mergedFlushEx.fetch().filters().forEach(f -> filterCount.value++); assertEquals(2, filterCount.value); - assertNotNull(mergedFlushEx.filters() + assertNotNull(mergedFlushEx.fetch().filters() .matchFirst(f -> f.conditions() .matchFirst(c -> c.kind() == KEY.value() && "match".equals(c.key() .value() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null)); - assertNotNull(mergedFlushEx.filters() + assertNotNull(mergedFlushEx.fetch().filters() .matchFirst(f -> f.conditions() .matchFirst(c -> c.kind() == HEADER.value() && "name".equals(c.header().name() @@ -988,6 +1041,27 @@ public void shouldGenerateMergedFlushExtensionWithStableOffset() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null)); } + @Test + public void shouldGenerateMergedConsumerFlushExtension() + { + byte[] build = KafkaFunctions.flushEx() + .typeId(0x01) + .merged() + .consumer() + .partition(1, 2) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x1, flushEx.typeId()); + + final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); + + assertEquals(mergedFlushEx.consumer().partition().partitionId(), 1); + assertEquals(mergedFlushEx.consumer().partition().partitionOffset(), 2); + } + @Test public void shouldMatchMergedDataExtension() throws Exception { @@ -2119,7 +2193,10 @@ public void shouldGenerateGroupFlushExtension() byte[] build = KafkaFunctions.flushEx() .typeId(0x01) .group() - .partition(0, 1L) + .leaderId("consumer-1") + .memberId("consumer-2") + .members("memberId-1", "test".getBytes()) + .members("memberId-2", "test".getBytes()) .build() .build(); @@ -2128,9 +2205,36 @@ public void shouldGenerateGroupFlushExtension() assertEquals(0x01, flushEx.typeId()); final KafkaGroupFlushExFW groupFlushEx = flushEx.group(); - final KafkaOffsetFW partition = groupFlushEx.partition(); - assertEquals(0, partition.partitionId()); - assertEquals(1L, partition.partitionOffset()); + final String leaderId = groupFlushEx.leaderId().asString(); + final String memberId = groupFlushEx.memberId().asString(); + assertEquals("consumer-1", leaderId); + assertEquals("consumer-2", memberId); + assertEquals(2, groupFlushEx.members().fieldCount()); + } + + @Test + public void shouldGenerateGroupFlushExtensionWithEmptyMetadata() + { + byte[] build = KafkaFunctions.flushEx() + .typeId(0x01) + .group() + .leaderId("consumer-1") + .memberId("consumer-2") + .members("memberId-1") + .members("memberId-2") + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, flushEx.typeId()); + + final KafkaGroupFlushExFW groupFlushEx = flushEx.group(); + final String leaderId = groupFlushEx.leaderId().asString(); + final String memberId = groupFlushEx.memberId().asString(); + assertEquals("consumer-1", leaderId); + assertEquals("consumer-2", memberId); + assertEquals(2, groupFlushEx.members().fieldCount()); } @Test @@ -3818,6 +3922,30 @@ public void shouldGenerateMergedBeginExtensionWithHeadersFilter() @Test public void shouldGenerateGroupBeginExtension() + { + byte[] build = KafkaFunctions.beginEx() + .typeId(0x01) + .group() + .groupId("test") + .protocol("roundrobin") + .timeout(10) + .metadata("test".getBytes()) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaBeginExFW beginEx = new KafkaBeginExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, beginEx.typeId()); + assertEquals(KafkaApi.GROUP.value(), beginEx.kind()); + + final KafkaGroupBeginExFW groupBeginEx = beginEx.group(); + assertEquals("test", groupBeginEx.groupId().asString()); + assertEquals("roundrobin", groupBeginEx.protocol().asString()); + assertEquals(10, groupBeginEx.timeout()); + } + + @Test + public void shouldGenerateGroupBeginWithEmptyMetadataExtension() { byte[] build = KafkaFunctions.beginEx() .typeId(0x01) @@ -3844,11 +3972,13 @@ public void shouldGenerateConsumerBeginExtension() { byte[] build = KafkaFunctions.beginEx() .typeId(0x01) - .consumer() - .groupId("test") - .topic("topic") - .partition(1) - .build() + .consumer() + .groupId("test") + .consumerId("consumer-1") + .timeout(10000) + .topic("topic") + .partition(0) + .build() .build(); DirectBuffer buffer = new UnsafeBuffer(build); @@ -3924,52 +4054,33 @@ public void shouldMatchGroupBeginExtension() throws Exception .group(f -> f .groupId("test") .protocol("roundrobin") - .timeout(10)) + .timeout(10) + .metadataLen("test".length()) + .metadata(m -> m.set("test".getBytes()))) .build(); assertNotNull(matcher.match(byteBuf)); } - @Test - public void shouldGenerateGroupDataExtension() - { - byte[] build = KafkaFunctions.dataEx() - .typeId(0x01) - .group() - .leaderId("test1") - .memberId("test2") - .members(2) - .build() - .build(); - - DirectBuffer buffer = new UnsafeBuffer(build); - KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity()); - assertEquals(0x01, dataEx.typeId()); - assertEquals(KafkaApi.GROUP.value(), dataEx.kind()); - - final KafkaGroupDataExFW groupDataEx = dataEx.group(); - assertEquals("test1", groupDataEx.leaderId().asString()); - assertEquals("test2", groupDataEx.memberId().asString()); - assertTrue(groupDataEx.members() == 2); - } - @Test public void shouldGenerateConsumerDataExtension() { byte[] build = KafkaFunctions.dataEx() - .typeId(0x01) + .typeId(0x03) .consumer() .partition(0) + .assignment("localhost:9092", 0) .build() .build(); DirectBuffer buffer = new UnsafeBuffer(build); KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity()); - assertEquals(0x01, dataEx.typeId()); + assertEquals(0x03, dataEx.typeId()); assertEquals(KafkaApi.CONSUMER.value(), dataEx.kind()); final KafkaConsumerDataExFW consumerDataEx = dataEx.consumer(); assertTrue(consumerDataEx.partitions().fieldCount() == 1); + assertTrue(consumerDataEx.assignments().fieldCount() == 1); } @Test @@ -3978,7 +4089,7 @@ public void shouldGenerateOffsetFetchDataExtension() byte[] build = KafkaFunctions.dataEx() .typeId(0x01) .offsetFetch() - .topic("test", 0, 1L, 2L) + .topic("test", 0, 1L) .build() .build(); @@ -3990,8 +4101,8 @@ public void shouldGenerateOffsetFetchDataExtension() final KafkaOffsetFetchDataExFW offsetFetchDataEx = dataEx.offsetFetch(); KafkaOffsetFW offset = offsetFetchDataEx.topic().offsets().matchFirst(o -> o.partitionId() == 0); assertEquals("test", offsetFetchDataEx.topic().topic().asString()); - assertEquals(1L, offset.stableOffset()); - assertEquals(2L, offset.latestOffset()); + assertEquals(0, offset.partitionId()); + assertEquals(1L, offset.partitionOffset()); } @Test @@ -4015,32 +4126,6 @@ public void shouldGenerateOffsetCommitDataExtension() assertEquals(1L, offsetCommitDataEx.partitionOffset()); } - @Test - public void shouldMatchGroupDataExtension() throws Exception - { - BytesMatcher matcher = KafkaFunctions.matchDataEx() - .typeId(0x01) - .group() - .leaderId("test1") - .memberId("test2") - .members(2) - .build() - .build(); - - ByteBuffer byteBuf = ByteBuffer.allocate(1024); - - new KafkaDataExFW.Builder() - .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) - .typeId(0x01) - .group(f -> f - .leaderId("test1") - .memberId("test2") - .members(2)) - .build(); - - assertNotNull(matcher.match(byteBuf)); - } - @Test public void shouldInvokeLength() throws Exception { @@ -4301,21 +4386,23 @@ public void shouldMatchMergedFlushExtension() throws Exception BytesMatcher matcher = KafkaFunctions.matchFlushEx() .typeId(0x01) .merged() - .partition(1, 2) - .progress(0, 1L) - .key("key") - .build() + .fetch() + .partition(1, 2) + .progress(0, 1L) + .key("key") + .build() .build(); ByteBuffer byteBuf = ByteBuffer.allocate(1024); new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.partition(p -> p.partitionId(1).partitionOffset(2)) + .merged(f -> f + .fetch(m -> m.partition(p -> p.partitionId(1).partitionOffset(2)) .progressItem(p -> p .partitionId(0) .partitionOffset(1L)) - .key(k -> k.length(3).value(v -> v.set("key".getBytes(UTF_8))))) + .key(k -> k.length(3).value(v -> v.set("key".getBytes(UTF_8)))))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4327,6 +4414,7 @@ public void shouldMatchMergedFlushExtensionWithLatestOffset() throws Exception BytesMatcher matcher = KafkaFunctions.matchFlushEx() .typeId(0x01) .merged() + .fetch() .partition(0, 1L, 1L) .progress(0, 1L, 1L) .build() @@ -4337,12 +4425,12 @@ public void shouldMatchMergedFlushExtensionWithLatestOffset() throws Exception new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> - f.partition(p -> p.partitionId(0).partitionOffset(1L).latestOffset(1L)) + .merged(f -> f + .fetch(m -> m.partition(p -> p.partitionId(0).partitionOffset(1L).latestOffset(1L)) .progressItem(p -> p .partitionId(0) .partitionOffset(1L) - .latestOffset(1L))) + .latestOffset(1L)))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4359,9 +4447,11 @@ public void shouldMatchMergedFlushExtensionTypeId() throws Exception new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.progressItem(p -> p - .partitionId(0) - .partitionOffset(1L))) + .merged(f -> f + .fetch(m -> + m.progressItem(p -> p + .partitionId(0) + .partitionOffset(1L)))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4372,6 +4462,7 @@ public void shouldMatchMergedFlushExtensionProgress() throws Exception { BytesMatcher matcher = KafkaFunctions.matchFlushEx() .merged() + .fetch() .progress(0, 1L, 1L, 1L) .build() .build(); @@ -4380,11 +4471,12 @@ public void shouldMatchMergedFlushExtensionProgress() throws Exception new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.progressItem(p -> p - .partitionId(0) - .partitionOffset(1L) - .stableOffset(1L) - .latestOffset(1L))) + .merged(f -> f + .fetch(m -> m.progressItem(p -> p + .partitionId(0) + .partitionOffset(1L) + .stableOffset(1L) + .latestOffset(1L)))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4394,12 +4486,14 @@ public void shouldMatchMergedFlushExtensionProgress() throws Exception public void shouldMatchMergedFlushExtensionFilters() throws Exception { BytesMatcher matcher = KafkaFunctions.matchFlushEx() + .typeId(0x01) .merged() - .filter() - .key("key") - .header("name", "value") + .fetch() + .filter() + .key("key") + .header("name", "value") + .build() .build() - .build() .build(); ByteBuffer byteBuf = ByteBuffer.allocate(1024); @@ -4408,7 +4502,7 @@ public void shouldMatchMergedFlushExtensionFilters() throws Exception .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) .merged(f -> f - .filtersItem(i -> i + .fetch(m -> m.filtersItem(i -> i .conditionsItem(c -> c .key(k -> k .length(3) @@ -4419,6 +4513,7 @@ public void shouldMatchMergedFlushExtensionFilters() throws Exception .name(n -> n.set("name".getBytes(UTF_8))) .valueLen(5) .value(v -> v.set("value".getBytes(UTF_8))))))) + ) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4435,9 +4530,10 @@ public void shouldNotMatchMergedFlushExtensionTypeId() throws Exception new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.progressItem(p -> p - .partitionId(0) - .partitionOffset(1L))) + .merged(f -> f + .fetch(m -> m.progressItem(p -> p + .partitionId(0) + .partitionOffset(1L)))) .build(); matcher.match(byteBuf); @@ -4449,17 +4545,19 @@ public void shouldNotMatchMergedFlushExtensionProgress() throws Exception BytesMatcher matcher = KafkaFunctions.matchFlushEx() .typeId(0x01) .merged() - .progress(0, 2L) - .build() + .fetch() + .progress(0, 2L) + .build() .build(); ByteBuffer byteBuf = ByteBuffer.allocate(1024); new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.progressItem(p -> p + .merged(f -> f + .fetch(m -> m.progressItem(p -> p .partitionId(0) - .partitionOffset(1L))) + .partitionOffset(1L)))) .build(); matcher.match(byteBuf); @@ -4694,6 +4792,32 @@ public void shouldNotMatchFetchFlushExtensionWithStableOffset() throws Exception matcher.match(byteBuf); } + @Test + public void shouldMatchGroupFlushExtension() throws Exception + { + BytesMatcher matcher = KafkaFunctions.matchFlushEx() + .typeId(0x01) + .group() + .leaderId("memberId-1") + .memberId("memberId-2") + .members("memberId-1", "test") + .build() + .build(); + + ByteBuffer byteBuf = ByteBuffer.allocate(1024); + + new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) + .typeId(0x01) + .group(f -> f.leaderId("memberId-1").memberId("memberId-2"). + members(m -> m.item(i -> + i.id("memberId-1") + .metadataLen("test".length()).metadata(o -> o.set("test".getBytes()))))) + .build(); + + assertNotNull(matcher.match(byteBuf)); + } + + @Test(expected = Exception.class) public void shouldNotMatchFetchFlushExtensionWithLatestOffset() throws Exception { diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java new file mode 100644 index 0000000000..8cf378a166 --- /dev/null +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.kafka.streams.application; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +public class ConsumerIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/consumer"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${app}/partition.assignment/client", + "${app}/partition.assignment/server"}) + public void shouldAssignPartition() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java index ae9738a9b4..416825c4db 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java @@ -26,7 +26,6 @@ import org.kaazing.k3po.junit.annotation.Specification; import org.kaazing.k3po.junit.rules.K3poRule; - public class GroupIT { private final K3poRule k3po = new K3poRule() @@ -90,4 +89,22 @@ public void shouldIgnoreHeartbeatBeforeHandshakeComplete() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/rebalance.sync.group/client", + "${app}/rebalance.sync.group/server"}) + public void shouldHandleRebalanceSyncGroup() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/partition.assignment/client", + "${app}/partition.assignment/server"}) + public void shouldAssignGroupPartition() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java index a6fd7859e0..e019773402 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java @@ -647,4 +647,22 @@ public void shouldFetchMergedMessagesWithIsolationReadCommitted() throws Excepti { k3po.finish(); } + + @Test + @Specification({ + "${app}/merged.group.fetch.message.value/client", + "${app}/merged.group.fetch.message.value/server"}) + public void shouldFetchGroupMergedMessage() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/unmerged.group.fetch.message.value/client", + "${app}/unmerged.group.fetch.message.value/server"}) + public void shouldFetchGroupUnmergedMessage() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/client.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/client.rpt index a9b8af7a00..7d6a3b1061 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/client.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/client.rpt @@ -57,6 +57,7 @@ read "Hello, again" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/server.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/server.rpt index 085878d37b..f2e0624691 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/server.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/server.rpt @@ -63,6 +63,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/client.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/client.rpt index aceeba26e0..a26e271b43 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/client.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/client.rpt @@ -53,6 +53,7 @@ read "Hello, again" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/server.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/server.rpt index 999c278a0c..1bf51d9003 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/server.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/server.rpt @@ -59,6 +59,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/client.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/client.rpt index 7c455f08a9..3cf2256370 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/client.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/client.rpt @@ -55,6 +55,7 @@ read "Hello, again" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/server.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/server.rpt index 889af16b13..ddfd07e530 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/server.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/server.rpt @@ -61,6 +61,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} From 01c3ac6924667853ce8d5713266d30781c41c1ae Mon Sep 17 00:00:00 2001 From: bmaidics Date: Tue, 5 Sep 2023 01:21:38 +0200 Subject: [PATCH 076/115] Session expiry (#387) --- .../client.rpt | 260 +++++++++ .../server.rpt | 267 +++++++++ .../client.rpt | 51 ++ .../server.rpt | 53 ++ .../session.cancel.session.expiry/client.rpt | 66 +++ .../session.cancel.session.expiry/server.rpt | 72 +++ .../session.client.sent.reset/client.rpt | 40 +- .../session.client.sent.reset/server.rpt | 33 ++ .../kafka/session.client.takeover/client.rpt | 79 ++- .../kafka/session.client.takeover/server.rpt | 80 ++- .../client.rpt | 260 +++++++++ .../server.rpt | 267 +++++++++ .../client.rpt | 128 +++++ .../server.rpt | 122 ++++ .../client.rpt | 128 +++++ .../server.rpt | 122 ++++ .../session.exists.clean.start/client.rpt | 78 ++- .../session.exists.clean.start/server.rpt | 80 ++- .../client.rpt | 25 +- .../server.rpt | 13 + .../streams/kafka/session.redirect/client.rpt | 82 +-- .../streams/kafka/session.redirect/server.rpt | 39 +- .../session.server.sent.reset/client.rpt | 42 +- .../session.server.sent.reset/server.rpt | 33 ++ .../client.rpt | 32 ++ .../server.rpt | 33 ++ .../kafka/session.subscribe/client.rpt | 46 +- .../kafka/session.subscribe/server.rpt | 33 ++ .../client.rpt | 32 ++ .../server.rpt | 33 ++ .../client.rpt | 32 ++ .../server.rpt | 33 ++ .../client.rpt | 127 ++-- .../server.rpt | 128 +++-- .../client.rpt | 127 ++-- .../server.rpt | 129 +++-- .../client.rpt | 23 +- .../server.rpt | 23 +- .../client.rpt | 71 ++- .../server.rpt | 72 ++- .../client.rpt | 109 +++- .../server.rpt | 110 +++- .../client.rpt | 107 ++-- .../server.rpt | 108 ++-- .../client.rpt | 127 ++-- .../server.rpt | 128 +++-- .../client.rpt | 6 + .../server.rpt | 6 + .../client.rpt | 6 + .../server.rpt | 6 + .../client.rpt | 6 + .../server.rpt | 6 + .../client.rpt | 35 ++ .../server.rpt | 36 ++ .../client.rpt | 35 ++ .../server.rpt | 36 ++ .../client.rpt | 38 ++ .../server.rpt | 41 ++ .../client.rpt | 38 ++ .../server.rpt | 41 ++ .../streams/mqtt/session.redirect/client.rpt | 5 +- .../streams/mqtt/session.redirect/server.rpt | 5 +- .../client.rpt | 0 .../server.rpt | 0 .../binding/mqtt/kafka/streams/KafkaIT.java | 58 +- .../binding/mqtt/kafka/streams/MqttIT.java | 46 +- .../internal/MqttKafkaConfiguration.java | 14 + .../config/MqttKafkaBindingConfig.java | 2 +- .../stream/MqttKafkaSessionFactory.java | 542 ++++++++++++------ .../internal/MqttKafkaConfigurationTest.java | 6 + .../stream/MqttKafkaSessionProxyIT.java | 172 +++--- .../binding/mqtt/internal/MqttFunctions.java | 165 ++++-- .../main/resources/META-INF/zilla/mqtt.idl | 22 +- .../client.rpt | 39 ++ .../server.rpt | 42 ++ .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 47 ++ .../server.rpt | 48 ++ .../client.rpt | 39 ++ .../server.rpt | 39 ++ .../client.rpt | 32 +- .../server.rpt | 32 +- .../client.rpt | 24 +- .../server.rpt | 24 +- .../mqtt/internal/MqttFunctionsTest.java | 114 ++-- .../mqtt/streams/application/SessionIT.java | 9 + .../mqtt/streams/network/ConnectionIT.java | 9 + .../mqtt/streams/network/SessionIT.java | 9 + .../mqtt/internal/MqttConfiguration.java | 7 - .../internal/stream/MqttServerFactory.java | 115 +++- .../mqtt/internal/MqttConfigurationTest.java | 3 - .../mqtt/internal/stream/ConnectionIT.java | 93 +-- .../binding/mqtt/internal/stream/PingIT.java | 6 - .../mqtt/internal/stream/PublishIT.java | 57 -- .../mqtt/internal/stream/SessionIT.java | 52 +- .../mqtt/internal/stream/SubscribeIT.java | 59 -- .../mqtt/internal/stream/UnsubscribeIT.java | 18 - 100 files changed, 5223 insertions(+), 1166 deletions(-) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt rename incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/{session.will.message.will.id.mismatch.no.deliver => session.will.message.will.id.mismatch.skip.delivery}/client.rpt (76%) rename incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/{session.will.message.will.id.mismatch.no.deliver => session.will.message.will.id.mismatch.skip.delivery}/server.rpt (75%) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt rename incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/{session.will.message.client.takeover.deliver.will => session.will.message.takeover.deliver.will}/client.rpt (100%) rename incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/{session.will.message.client.takeover.deliver.will => session.will.message.takeover.deliver.will}/server.rpt (100%) create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt new file mode 100644 index 0000000000..917160940c --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt @@ -0,0 +1,260 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .filter() + .header("type", "expiry-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + +read notify RECEIVED_WILL_DELIVER_AT_SIGNAL + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .hashKey("client-1") + .build() + .build()} +write flush + + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_INITIAL_MIGRATE_SIGNAL + +write close +read closed + + +connect await SENT_INITIAL_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +read zilla:data.null +read notify RECEIVED_GROUP_MEMBERS_LEADER + +write abort + + +connect await RECEIVED_GROUP_MEMBERS_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +# no will signals +# no session state +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} +write flush + +write abort +read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt new file mode 100644 index 0000000000..13916ca7e9 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt @@ -0,0 +1,267 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .filter() + .header("type", "expiry-signal") + .build() + .build() + .build()} + +connected +read notify SIGNAL_STREAM_STARTED + +write await RECEIVED_EXPIRY_CANCELLATION_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write await RECEIVED_EXPIRE_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + +# expiry signal for client-1, expire at (now + delay) +write await RECEIVED_EXPIRE_AT_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} +write flush + +# cleanup session state +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .hashKey("client-1") + .build() + .build()} +read zilla:data.null + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected +# receive sender-1 migrate signal + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# send group members (leader) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +write flush + +read aborted + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_WILL_CANCELLATION_SIGNAL + + +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_EXPIRY_CANCELLATION_SIGNAL + +# expiry signal for client-1, deliver later +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +write notify RECEIVED_EXPIRE_LATER_SIGNAL + +# no session state +# no migrate signals +write advise zilla:flush + +# expiry signal for client-1, expire at (now + delay) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + +write notify RECEIVED_EXPIRE_AT_SIGNAL + +read aborted +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt index d93c7548e5..aa4d4e79e6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt @@ -114,6 +114,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() @@ -142,6 +174,25 @@ read ${mqtt:session() .subscription("sensor/one", 1) .build()} +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + write abort read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt index 47e979d3ba..35ab33205b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt @@ -109,6 +109,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() @@ -137,6 +170,26 @@ write ${mqtt:session() .build()} write flush +# session expireAt signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + read aborted write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt new file mode 100644 index 0000000000..4250a2f898 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt @@ -0,0 +1,66 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .filter() + .header("type", "expiry-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(2000) + .expireAt(expireAt) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt new file mode 100644 index 0000000000..7dd3cc0d8a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt @@ -0,0 +1,72 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +property delayMillis 2000L +property expireAt ${mqtt:timestamp() + delayMillis} + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .filter() + .header("type", "expiry-signal") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .header("type", "will-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(2000) + .expireAt(expireAt) + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt index 090f55d3d1..4282c55601 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt @@ -44,13 +44,13 @@ write zilla:data.ext ${kafka:dataEx() .build()} write zilla:data.empty write flush +write notify SENT_INIT_MIGRATE write close read closed -write notify INIT_MIGRATE_FINISHED -connect await INIT_MIGRATE_FINISHED +connect await SENT_INIT_MIGRATE "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -74,12 +74,12 @@ read zilla:data.ext ${kafka:matchDataEx() .members(1) .build() .build()} +read notify RECEIVED_LEADER_DATA read abort -write notify GROUP_FINISHED -connect await GROUP_FINISHED +connect await RECEIVED_LEADER_DATA "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -114,6 +114,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt index d7e156df05..7107c3c822 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt @@ -108,6 +108,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt index 6d3087baf0..0112b51e6a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt @@ -127,6 +127,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() @@ -190,12 +222,14 @@ write zilla:data.ext ${kafka:dataEx() .hashKey("client-1") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(1000) - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(1000) + .build() + .build()} write flush write close @@ -355,6 +389,39 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt index 577dacf53d..6398bd4f6d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt @@ -128,6 +128,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() @@ -192,12 +225,14 @@ read zilla:data.ext ${kafka:matchDataEx() .hashKey("client-1") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(1000) - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(1000) + .build() + .build()} read closed write close @@ -347,6 +382,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt new file mode 100644 index 0000000000..b4c4714a4c --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt @@ -0,0 +1,260 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .filter() + .header("type", "expiry-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + +read notify RECEIVED_WILL_DELIVER_AT_SIGNAL + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .hashKey("client-1") + .build() + .build()} +write flush + + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush +write notify SENT_INITIAL_MIGRATE_SIGNAL + +write close +read closed + + +connect await SENT_INITIAL_MIGRATE_SIGNAL + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +read zilla:data.null +read notify RECEIVED_GROUP_MEMBERS_LEADER + +write close + + +connect await RECEIVED_GROUP_MEMBERS_LEADER + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +# no will signals +# no session state +read advised zilla:flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} +write flush + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt new file mode 100644 index 0000000000..41203a3ab6 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt @@ -0,0 +1,267 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .filter() + .header("type", "expiry-signal") + .build() + .build() + .build()} + +connected +read notify SIGNAL_STREAM_STARTED + +write await RECEIVED_EXPIRY_CANCELLATION_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write await RECEIVED_EXPIRE_LATER_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + +# expiry signal for client-1, expire at (now + delay) +write await RECEIVED_EXPIRE_AT_SIGNAL +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} +write flush + +# cleanup session state +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1") + .hashKey("client-1") + .build() + .build()} +read zilla:data.null + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected +# receive sender-1 migrate signal + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(1000) + .build() + .build()} + +connected + +# send group members (leader) +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("member-1") + .memberId("member-1") + .members(1) + .build() + .build()} +write flush + +read closed + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_WILL_CANCELLATION_SIGNAL + + +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null +read notify RECEIVED_EXPIRY_CANCELLATION_SIGNAL + +# expiry signal for client-1, deliver later +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + +write notify RECEIVED_EXPIRE_LATER_SIGNAL + +# no session state +# no migrate signals +write advise zilla:flush + +# expiry signal for client-1, expire at (now + delay) +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + +write notify RECEIVED_EXPIRE_AT_SIGNAL + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt new file mode 100644 index 0000000000..4207bee339 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt @@ -0,0 +1,128 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_CLOSED + +connect await INIT_MIGRATE_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(100000) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write advise zilla:flush + + +connect await INIT_MIGRATE_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt new file mode 100644 index 0000000000..3c44ddd9c4 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt @@ -0,0 +1,122 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(100000) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +write advise zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt new file mode 100644 index 0000000000..db1551265a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt @@ -0,0 +1,128 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +write zilla:data.empty +write flush + +write close +read closed + +write notify INIT_MIGRATE_CLOSED + +connect await INIT_MIGRATE_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(0) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} + +write advise zilla:flush + + +connect await INIT_MIGRATE_CLOSED + "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt new file mode 100644 index 0000000000..ce6507d359 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt @@ -0,0 +1,122 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#migrate") + .hashKey("client-1") + .header("sender-id", "sender-1") + .build() + .build()} +read zilla:data.empty + +read closed +write close + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("consumer-1") + .memberId("consumer-1") + .members(1) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt_sessions") + .groupId("mqtt-clients") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() + .build() + .build()} + +connected + +# will delivery cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#will-signal") + .hashKey("client-1") + .header("type", "will-signal") + .build() + .build()} +read zilla:data.null + +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +write advise zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt index b891f89fe2..a1516d4ee6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt @@ -125,6 +125,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() @@ -190,12 +222,14 @@ write zilla:data.ext ${kafka:dataEx() .hashKey("client-1") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(1000) - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(1000) + .build() + .build()} write flush write close @@ -338,6 +372,38 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt index 7c2cf912f4..5253c8181c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt @@ -126,6 +126,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() @@ -193,12 +226,14 @@ read zilla:data.ext ${kafka:matchDataEx() .hashKey("client-1") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(1000) - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(1000) + .build() + .build()} read closed write close @@ -336,6 +371,39 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt index 702b7d22dc..70ddc96dcb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt @@ -44,13 +44,13 @@ write zilla:data.ext ${kafka:dataEx() .build()} write zilla:data.empty write flush +write notify SENT_INIT_MIGRATE write close read closed -write notify INIT_MIGRATE_FINISHED -connect await INIT_MIGRATE_FINISHED +connect await SENT_INIT_MIGRATE "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -74,13 +74,10 @@ read zilla:data.ext ${kafka:matchDataEx() .members(1) .build() .build()} +read notify RECEIVED_LEADER_DATA +read notify CONNACK_TRIGGERED -write notify CONNACK_TRIGGERED - - -write notify GROUP_FINISHED - -connect await GROUP_FINISHED +connect await RECEIVED_LEADER_DATA "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -115,5 +112,17 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt index 373d5ab9af..d551d404f6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt @@ -111,6 +111,19 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + write advise zilla:flush write notify CONNACK_TRIGGERED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt index fa6dac76f0..ef73e84569 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt @@ -23,6 +23,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") .groupId("mqtt-clients") + .consumerId("mqtt-1.example.com:1883") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -44,13 +45,13 @@ write zilla:data.ext ${kafka:dataEx() .build()} write zilla:data.empty write flush +write notify SENT_INIT_MIGRATE write close read closed -write notify INIT_MIGRATE_FINISHED -connect await INIT_MIGRATE_FINISHED +connect await SENT_INIT_MIGRATE "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -74,10 +75,9 @@ read zilla:data.ext ${kafka:matchDataEx() .members(1) .build() .build()} +read notify RECEIVED_LEADER_DATA -write notify GROUP_FINISHED - -connect await GROUP_FINISHED +connect await RECEIVED_LEADER_DATA "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -88,7 +88,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") .groupId("mqtt-clients") - .consumerId("localhost:1883") + .consumerId("mqtt-1.example.com:1883") .filter() .key("client-1") .build() @@ -113,70 +113,42 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush -read advised zilla:flush - write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() .deferred(0) .partition(-1, -1) - .key("client-1") + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") .build() .build()} -write ${mqtt:session() - .subscription("sensor/one", 1) - .build()} write flush -read zilla:data.ext ${kafka:matchDataEx() +# session expire later signal for client-1 +write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() .deferred(0) .partition(-1, -1) - .key("client-1") + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") .build() .build()} -read ${mqtt:session() - .subscription("sensor/one", 1) +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() .build()} +write flush -write notify SESSION_STATE_FINISHED - -connect await SESSION_STATE_FINISHED - "zilla://streams/kafka0" - option zilla:window 8192 - option zilla:transmission "duplex" - -write zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .merged() - .capabilities("FETCH_ONLY") - .topic("mqtt_messages") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") - .build() - .build() - .evaluation("EAGER") - .build() - .build()} - -connected - -read zilla:data.ext ${kafka:matchDataEx() - .typeId(zilla:id("kafka")) - .merged() - .filters(1) - .partition(0, 1, 2) - .progress(0, 2) - .progress(1, 1) - .key("sensor/one") - .header("zilla:filter", "sensor") - .header("zilla:filter", "one") - .header("zilla:local", "client") - .header("zilla:format", "TEXT") - .build() - .build()} -read "message" +read zilla:reset.ext ${kafka:resetEx() + .typeId(zilla:id("kafka")) + .consumerId("mqtt-2.example.com:1883") + .build()} +write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt index 0963cda94a..e5c4bb2b04 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") .groupId("mqtt-clients") - .consumerId("localhost:1883") + .consumerId("mqtt-1.example.com:1883") .filter() .key("client-1#migrate") .headerNot("sender-id", "sender-1") @@ -84,7 +84,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .capabilities("PRODUCE_AND_FETCH") .topic("mqtt_sessions") .groupId("mqtt-clients") - .consumerId("localhost:1883") + .consumerId("mqtt-1.example.com:1883") .filter() .key("client-1") .build() @@ -110,8 +110,41 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write zilla:reset.ext ${kafka:resetEx() .typeId(zilla:id("kafka")) - .consumerId("localhost:1884") + .consumerId("mqtt-2.example.com:1883") .build()} read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt index 010b8cde52..f85f4f3de5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt @@ -44,13 +44,12 @@ write zilla:data.ext ${kafka:dataEx() .build()} write zilla:data.empty write flush +write notify SENT_INIT_MIGRATE write close read closed -write notify INIT_MIGRATE_FINISHED - -connect await INIT_MIGRATE_FINISHED +connect await SENT_INIT_MIGRATE "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -74,10 +73,9 @@ read zilla:data.ext ${kafka:matchDataEx() .members(1) .build() .build()} +read notify RECEIVED_LEADER_DATA -write notify GROUP_FINISHED - -connect await GROUP_FINISHED +connect await RECEIVED_LEADER_DATA "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -112,6 +110,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt index f6b3814dd7..e1660a080a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt @@ -107,6 +107,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt index 5e03daea66..1e01bc5a4a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt @@ -110,6 +110,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush read zilla:data.ext ${kafka:matchDataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt index 4c99172db9..a6dcf27f75 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt @@ -103,6 +103,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush write zilla:data.ext ${kafka:dataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt index 45bdf2143f..aec532f06b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt @@ -44,13 +44,13 @@ write zilla:data.ext ${kafka:dataEx() .build()} write zilla:data.empty write flush +write notify SENT_INIT_MIGRATE write close read closed -write notify INIT_MIGRATE_FINISHED -connect await INIT_MIGRATE_FINISHED +connect await SENT_INIT_MIGRATE "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -74,10 +74,9 @@ read zilla:data.ext ${kafka:matchDataEx() .members(1) .build() .build()} +read notify RECEIVED_LEADER_DATA -write notify GROUP_FINISHED - -connect await GROUP_FINISHED +connect await RECEIVED_LEADER_DATA "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -112,6 +111,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() @@ -139,10 +170,9 @@ read zilla:data.ext ${kafka:matchDataEx() read ${mqtt:session() .subscription("sensor/one", 1) .build()} +read notify RECEIVED_SESSION_STATE -write notify SESSION_STATE_FINISHED - -connect await SESSION_STATE_FINISHED +connect await RECEIVED_SESSION_STATE "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt index f9cdaba867..2d4d9e6aac 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt @@ -108,6 +108,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt index 7d254eec73..5054bd4156 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt @@ -109,6 +109,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt index 4e5eb2990b..ec14a6fac1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt @@ -103,6 +103,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt index 030545d654..0b066697ae 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt @@ -109,6 +109,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + read advised zilla:flush write zilla:data.ext ${kafka:dataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt index 2463ccf23c..576e6e0d23 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt @@ -103,6 +103,39 @@ read zilla:data.ext ${kafka:matchDataEx() .build()} read zilla:data.null +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + write advise zilla:flush read zilla:data.ext ${kafka:matchDataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt index 404416a76b..7fd4b3fb14 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -52,14 +55,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -70,14 +75,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read notify RECEIVED_WILL_DELIVER_AT_SIGNAL write zilla:data.ext ${kafka:dataEx() @@ -234,6 +241,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() @@ -264,14 +303,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush write advise zilla:flush ${kafka:flushEx() @@ -301,14 +342,36 @@ write zilla:data.ext ${kafka:dataEx() .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} write flush write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt index 0680352986..08da729d9f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt @@ -29,6 +29,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -58,14 +61,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush # will signal for client-1, deliver at (now + delay) @@ -79,14 +84,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush # cleanup will message @@ -234,6 +241,39 @@ read zilla:data.ext ${kafka:matchDataEx() read zilla:data.null read notify RECEIVED_WILL_CANCELLATION_SIGNAL +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + # will message for client-1 read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -265,14 +305,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL @@ -304,17 +346,39 @@ read zilla:data.ext ${kafka:matchDataEx() .hashKey("client-1") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write notify RECEIVED_WILL_DELIVER_AT_SIGNAL +# session expireAt signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + read aborted write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt index 2435e95f5f..3ef943399b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -52,14 +55,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -70,14 +75,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read notify RECEIVED_WILL_DELIVER_AT_SIGNAL @@ -234,6 +241,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() @@ -266,14 +305,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush write advise zilla:flush ${kafka:flushEx() @@ -305,14 +346,36 @@ write zilla:data.ext ${kafka:dataEx() .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} write flush write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt index 3c650329ee..8f7ed45fc1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt @@ -29,6 +29,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -58,14 +61,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush # will signal for client-1, deliver at (now + delay) @@ -79,14 +84,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush # cleanup will message @@ -234,6 +241,40 @@ read zilla:data.ext ${kafka:matchDataEx() read zilla:data.null read notify RECEIVED_WILL_CANCELLATION_SIGNAL + +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + # will message for client-1 read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -267,14 +308,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL @@ -306,17 +349,39 @@ read zilla:data.ext ${kafka:matchDataEx() .hashKey("client-1") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write notify RECEIVED_WILL_DELIVER_AT_SIGNAL +# session expireAt signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + read aborted write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt index 2b23b0a6fe..df38527087 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -40,16 +43,18 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(deliverAt) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(deliverAt) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read notify RECEIVED_WILL_DELIVER_AT_SIGNAL -read notify WAIT_1_SECOND +read notify ONE_SECOND_ELAPSED read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt index abdb9b95eb..6771dc374a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt @@ -32,6 +32,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -46,17 +49,19 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(deliverAt) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(deliverAt) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush -write await WAIT_1_SECOND +write await ONE_SECOND_ELAPSED write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt index 3c8a37b63a..6f8757c5e5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -40,14 +43,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} connect "zilla://streams/kafka0" @@ -141,6 +146,38 @@ write zilla:begin.ext ${kafka:beginEx() connected +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() @@ -170,14 +207,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush write advise zilla:flush ${kafka:flushEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt index 28825c38d5..840033a4a6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt @@ -29,6 +29,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -45,14 +48,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush @@ -138,6 +143,39 @@ connected # no will cancellation signal due to clean-start +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + # will message for client-1 read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -168,14 +206,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt index 9f8bd8aab2..e315decae9 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -52,14 +55,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -100,14 +105,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") - .willId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} read notify RECEIVED_WILL_SIGNAL read advised zilla:flush @@ -221,6 +228,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() @@ -250,14 +289,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush write advise zilla:flush ${kafka:flushEx() @@ -299,5 +340,25 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} +write flush + write close read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt index 4e75d563cd..c20fe09fdc 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt @@ -28,6 +28,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -57,14 +60,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush write await RECEIVED_WILL_SIGNAL_CLEANUP @@ -107,14 +112,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") - .willId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .build() + .build()} write flush write advise zilla:flush @@ -218,6 +225,39 @@ read zilla:data.ext ${kafka:matchDataEx() read zilla:data.null read notify RECEIVED_WILL_CANCELLATION_SIGNAL +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + # will message for client-1 read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -248,14 +288,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(0) - .deliverAt(-1) - .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(0) + .deliverAt(-1) + .lifetimeId("7ce005a0-ce9d-444d-b14b-2f302d13799d") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read notify RECEIVED_WILL_DELIVER_LATER_SIGNAL read advised zilla:flush ${kafka:matchFlushEx() @@ -303,5 +345,25 @@ read zilla:data.null write notify RECEIVED_WILL_SIGNAL_CLEANUP +# session expireAt signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + read closed write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt index 86ed617384..5af55920f7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -52,14 +55,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -70,14 +75,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read notify RECEIVED_WILL_DELIVER_AT_SIGNAL write zilla:data.ext ${kafka:dataEx() @@ -242,6 +249,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() @@ -271,14 +310,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush write advise zilla:flush ${kafka:flushEx() @@ -320,14 +361,16 @@ write zilla:data.ext ${kafka:dataEx() .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt index b9da4c6e7e..4b30550610 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt @@ -29,6 +29,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -58,14 +61,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush # will signal for client-1, deliver at (now + delay) @@ -79,14 +84,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush # cleanup will message @@ -245,6 +252,39 @@ read zilla:data.ext ${kafka:matchDataEx() read zilla:data.null read notify RECEIVED_WILL_CANCELLATION_SIGNAL +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + # will message for client-1 read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -275,14 +315,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL @@ -327,14 +369,16 @@ read zilla:data.ext ${kafka:matchDataEx() .hashKey("client-1") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read notify RECEIVED_WILL_DELIVER_AT_SIGNAL read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt similarity index 76% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/client.rpt rename to incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt index 64e32ca7c3..715dba25b0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -52,14 +55,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -70,14 +75,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} read notify RECEIVED_WILL_DELIVER_AT_SIGNAL @@ -210,6 +217,38 @@ write zilla:data.ext ${kafka:dataEx() .build()} write flush +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} +write flush + write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() @@ -242,14 +281,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush write advise zilla:flush ${kafka:flushEx() @@ -279,14 +320,36 @@ write zilla:data.ext ${kafka:dataEx() .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} write flush write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt similarity index 75% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/server.rpt rename to incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt index 901b37f204..20f972bcb5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.no.deliver/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt @@ -29,6 +29,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -58,14 +61,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush # will signal for client-1, different willId @@ -79,14 +84,16 @@ write zilla:data.ext ${kafka:dataEx() .header("type", "will-signal") .build() .build()} -write ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +write ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write flush @@ -207,6 +214,39 @@ read zilla:data.ext ${kafka:matchDataEx() read zilla:data.null read notify RECEIVED_WILL_CANCELLATION_SIGNAL +# session expiry cancellation signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + +# session expire later signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(-1) + .build() + .build()} + # will message for client-1 read zilla:data.ext ${kafka:matchDataEx() .typeId(zilla:id("kafka")) @@ -240,14 +280,16 @@ read zilla:data.ext ${kafka:matchDataEx() .header("type", "will-signal") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(-1) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(-1) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL @@ -279,17 +321,39 @@ read zilla:data.ext ${kafka:matchDataEx() .hashKey("client-1") .build() .build()} -read ${mqtt:willSignal() - .clientId("client-1") - .delay(1000) - .deliverAt(2000) - .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") - .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") - .instanceId("zilla-1") - .build()} +read ${mqtt:sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .deliverAt(2000) + .lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1") + .willId("d252a6bd-abb5-446a-b0f7-d0a3d8c012e2") + .build() + .build()} write notify RECEIVED_WILL_DELIVER_AT_SIGNAL +# session expireAt signal for client-1 +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(1000) + .expireAt(2000) + .build() + .build()} + read aborted write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt index 104337d267..682eb828e6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -49,6 +52,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt index 5e8609f387..e7fe1ca308 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt @@ -30,6 +30,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -49,6 +52,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt index 2b4698bafc..37157d06e7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -49,6 +52,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt index 3bfdcf16c4..144a12764f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt @@ -30,6 +30,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -49,6 +52,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt index f2c9e2f9d1..fd91c8e9e1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt @@ -26,6 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -48,6 +51,9 @@ write zilla:begin.ext ${kafka:beginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt index d637b63167..eb15a3f2e1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt @@ -30,6 +30,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} @@ -48,6 +51,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .filter() .header("type", "will-signal") .build() + .filter() + .header("type", "expiry-signal") + .build() .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt new file mode 100644 index 0000000000..8df5293153 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect await SIGNAL_STREAM_STARTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + + +read zilla:data.empty + +write abort +read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt new file mode 100644 index 0000000000..be7bfea47a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read aborted +write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt new file mode 100644 index 0000000000..0a23695f58 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt @@ -0,0 +1,35 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect await SIGNAL_STREAM_STARTED + "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + + +read zilla:data.empty + +write close +read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt new file mode 100644 index 0000000000..58940fdcd8 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read closed +write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt new file mode 100644 index 0000000000..9b17e7cb7a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt @@ -0,0 +1,38 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(100) + .clientId("client-1") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(30) + .clientId("client-1") + .build() + .build()} + +connected + +read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt new file mode 100644 index 0000000000..2232e77d8d --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(100) + .clientId("client-1") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(30) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt new file mode 100644 index 0000000000..1b98a74e50 --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt @@ -0,0 +1,38 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(0) + .clientId("client-1") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(2) + .clientId("client-1") + .build() + .build()} + +connected + +read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt new file mode 100644 index 0000000000..4916dcb1ed --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +accept "zilla://streams/mqtt0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(0) + .clientId("client-1") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(2) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt index 7bcdb45192..14a46dcfcd 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .expiry(1) .clientId("client-1") - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build() .build()} @@ -30,6 +30,7 @@ connected read zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverRef("localhost:1884") + .serverRef("mqtt-2.example.com:1883") .build()} + write aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt index daa2e283a9..bffc992ae2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client-1") - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build() .build()} @@ -31,6 +31,7 @@ connected write zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverRef("localhost:1884") + .serverRef("mqtt-2.example.com:1883") .build()} + read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/client.rpt rename to incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.client.takeover.deliver.will/server.rpt rename to incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java index ff22cf0721..b683ff1426 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java @@ -575,6 +575,33 @@ public void shouldGroupStreamReceiveServerSentReset() throws Exception k3po.finish(); } + @Test + @Specification({ + "${kafka}/session.close.expire.session.state/client", + "${kafka}/session.close.expire.session.state/server"}) + public void shouldExpireSessionOnClose() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.abort.expire.session.state/client", + "${kafka}/session.abort.expire.session.state/server"}) + public void shouldExpireSessionOnAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.cancel.session.expiry/client", + "${kafka}/session.cancel.session.expiry/server"}) + public void shouldCancelSessionExpiry() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${kafka}/session.will.message.abort.deliver.will/client", @@ -613,8 +640,8 @@ public void shouldGenerateLifeTimeIdOnCleanStart() throws Exception @Test @Specification({ - "${kafka}/session.will.message.will.id.mismatch.no.deliver/client", - "${kafka}/session.will.message.will.id.mismatch.no.deliver/server"}) + "${kafka}/session.will.message.will.id.mismatch.skip.delivery/client", + "${kafka}/session.will.message.will.id.mismatch.skip.delivery/server"}) public void shouldNotSendWillMessageOnWillIdMismatch() throws Exception { k3po.finish(); @@ -664,4 +691,31 @@ public void shouldReconnectWillStreamOnKafkaReset() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${kafka}/session.connect.override.max.session.expiry/client", + "${kafka}/session.connect.override.max.session.expiry/server"}) + public void shouldConnectServerOverridesSessionExpiryTooBig() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.connect.override.min.session.expiry/client", + "${kafka}/session.connect.override.min.session.expiry/server"}) + public void shouldConnectServerOverridesSessionExpiryTooSmall() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${kafka}/session.redirect/client", + "${kafka}/session.redirect/server"}) + public void shouldRedirect() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java index 87a06466ac..cbd728c02b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java +++ b/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java @@ -413,6 +413,24 @@ public void shouldAcknowledgeSingleTopicFilter() throws Exception k3po.finish(); } + @Test + @Specification({ + "${mqtt}/session.connect.override.max.session.expiry/client", + "${mqtt}/session.connect.override.max.session.expiry/server"}) + public void shouldConnectServerOverridesSessionExpiryTooBig() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.connect.override.min.session.expiry/client", + "${mqtt}/session.connect.override.min.session.expiry/server"}) + public void shouldConnectServerOverridesSessionExpiryTooSmall() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${mqtt}/session.abort.reconnect.non.clean.start/client", @@ -494,6 +512,28 @@ public void shouldSessionStreamReceiveServerSentReset() throws Exception k3po.finish(); } + @Test + @Specification({ + "${mqtt}/session.close.expire.session.state/client", + "${mqtt}/session.close.expire.session.state/server"}) + public void shouldExpireSessionOnClose() throws Exception + { + k3po.start(); + k3po.notifyBarrier("SIGNAL_STREAM_STARTED"); + k3po.finish(); + } + + @Test + @Specification({ + "${mqtt}/session.abort.expire.session.state/client", + "${mqtt}/session.abort.expire.session.state/server"}) + public void shouldExpireSessionOnAbort() throws Exception + { + k3po.start(); + k3po.notifyBarrier("SIGNAL_STREAM_STARTED"); + k3po.finish(); + } + @Test @Specification({ "${mqtt}/session.will.message.abort.deliver.will/client", @@ -540,9 +580,9 @@ public void shouldSendWillMessageOnClientReconnectCleanStart() throws Exception @Test @Specification({ - "${mqtt}/session.will.message.client.takeover.deliver.will/client", - "${mqtt}/session.will.message.client.takeover.deliver.will/server"}) - public void shouldSendWillMessageOnAbortClientTakeover() throws Exception + "${mqtt}/session.will.message.takeover.deliver.will/client", + "${mqtt}/session.will.message.takeover.deliver.will/server"}) + public void shouldDeliverWillMessageOnSessionTakeover() throws Exception { k3po.start(); k3po.notifyBarrier("WILL_STREAM_STARTED"); diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java index f04b2a3b27..0e33fc3df3 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java @@ -42,6 +42,8 @@ public class MqttKafkaConfiguration extends Configuration public static final PropertyDef TIME; public static final BooleanPropertyDef WILL_AVAILABLE; public static final IntPropertyDef WILL_STREAM_RECONNECT_DELAY; + public static final IntPropertyDef SESSION_EXPIRY_INTERVAL_MAX; + public static final IntPropertyDef SESSION_EXPIRY_INTERVAL_MIN; static { @@ -61,6 +63,8 @@ public class MqttKafkaConfiguration extends Configuration MqttKafkaConfiguration::decodeLongSupplier, MqttKafkaConfiguration::defaultTime); WILL_AVAILABLE = config.property("will.available", true); WILL_STREAM_RECONNECT_DELAY = config.property("will.stream.reconnect", 2); + SESSION_EXPIRY_INTERVAL_MAX = config.property("session.expiry.interval.max", 30000); + SESSION_EXPIRY_INTERVAL_MIN = config.property("session.expiry.interval.min", 1000); MQTT_KAFKA_CONFIG = config; } @@ -110,6 +114,16 @@ public int willStreamReconnectDelay() return WILL_STREAM_RECONNECT_DELAY.getAsInt(this); } + public int sessionExpiryIntervalMax() + { + return SESSION_EXPIRY_INTERVAL_MAX.get(this); + } + + public int sessionExpiryIntervalMin() + { + return SESSION_EXPIRY_INTERVAL_MIN.get(this); + } + private static StringSupplier decodeStringSupplier( String fullyQualifiedMethodName) { diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java index 165d247fec..4cce257eeb 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java @@ -31,7 +31,7 @@ public class MqttKafkaBindingConfig public final MqttKafkaOptionsConfig options; public final List routes; - public MqttKafkaSessionFactory.KafkaWillProxy willProxy; + public MqttKafkaSessionFactory.KafkaSignalStream willProxy; public MqttKafkaBindingConfig( BindingConfig binding) diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index 9daec953b9..6de885d22c 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -14,11 +14,9 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.MQTT_CLIENTS_GROUP_ID; import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; import static java.lang.System.currentTimeMillis; -import static java.time.Instant.now; import static java.util.concurrent.TimeUnit.SECONDS; import static org.agrona.BitUtil.SIZE_OF_INT; import static org.agrona.BitUtil.SIZE_OF_LONG; @@ -27,6 +25,7 @@ import java.nio.charset.StandardCharsets; import java.util.Optional; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; @@ -34,9 +33,11 @@ import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Int2ObjectHashMap; import org.agrona.collections.IntHashSet; import org.agrona.collections.Long2ObjectHashMap; import org.agrona.collections.LongArrayList; +import org.agrona.collections.Object2LongHashMap; import org.agrona.collections.Object2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; @@ -54,12 +55,14 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaKeyFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.KafkaOffsetType; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttExpirySignalFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPayloadFormatFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttPublishFlags; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSessionFlags; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSessionSignalFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttSessionStateFW; -import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttWillDeliverAt; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttTime; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttWillMessageFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.MqttWillSignalFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.OctetsFW; @@ -100,16 +103,25 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private static final String MQTT_TYPE_NAME = "mqtt"; private static final String MIGRATE_KEY_POSTFIX = "#migrate"; private static final String WILL_SIGNAL_KEY_POSTFIX = "#will-signal"; + private static final String EXPIRY_SIGNAL_KEY_POSTFIX = "#expiry-signal"; private static final String WILL_KEY_POSTFIX = "#will-"; private static final String GROUP_PROTOCOL = "highlander"; private static final String16FW SENDER_ID_NAME = new String16FW("sender-id"); private static final String16FW TYPE_HEADER_NAME = new String16FW("type"); + private static final OctetsFW TYPE_HEADER_NAME_OCTETS = + new OctetsFW().wrap(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()); private static final String16FW WILL_SIGNAL_NAME = new String16FW("will-signal"); + private static final OctetsFW WILL_SIGNAL_NAME_OCTETS = + new OctetsFW().wrap(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()); + private static final String16FW EXPIRY_SIGNAL_NAME = new String16FW("expiry-signal"); + private static final OctetsFW EXPIRY_SIGNAL_NAME_OCTETS = + new OctetsFW().wrap(EXPIRY_SIGNAL_NAME.value(), 0, EXPIRY_SIGNAL_NAME.length()); private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); private static final int DATA_FLAG_COMPLETE = 0x03; public static final String MQTT_CLIENTS_GROUP_ID = "mqtt-clients"; private static final int SIGNAL_DELIVER_WILL_MESSAGE = 1; private static final int SIGNAL_CONNECT_WILL_STREAM = 2; + private static final int SIGNAL_EXPIRE_SESSION = 3; private static final int SIZE_OF_UUID = 38; private final BeginFW beginRO = new BeginFW(); @@ -124,7 +136,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final AbortFW.Builder abortRW = new AbortFW.Builder(); private final FlushFW.Builder flushRW = new FlushFW.Builder(); private final MqttWillMessageFW.Builder mqttMessageRW = new MqttWillMessageFW.Builder(); - private final MqttWillSignalFW.Builder mqttWillSignalRW = new MqttWillSignalFW.Builder(); + private final MqttSessionSignalFW.Builder mqttSessionSignalRW = new MqttSessionSignalFW.Builder(); private final Array32FW.Builder kafkaHeadersRW = new Array32FW.Builder<>(new KafkaHeaderFW.Builder(), new KafkaHeaderFW()); @@ -138,7 +150,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final ExtensionFW extensionRO = new ExtensionFW(); private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); private final MqttSessionStateFW mqttSessionStateRO = new MqttSessionStateFW(); - private final MqttWillSignalFW mqttWillSignalRO = new MqttWillSignalFW(); + private final MqttSessionSignalFW mqttSessionSignalRO = new MqttSessionSignalFW(); private final MqttWillMessageFW mqttWillRO = new MqttWillMessageFW(); private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); private final MqttResetExFW.Builder mqttResetExRW = new MqttResetExFW.Builder(); @@ -148,6 +160,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); + private final MqttBeginExFW.Builder mqttSessionBeginExRW = new MqttBeginExFW.Builder(); private final String16FW binaryFormat = new String16FW(MqttPayloadFormat.BINARY.name()); private final String16FW textFormat = new String16FW(MqttPayloadFormat.TEXT.name()); @@ -155,9 +168,10 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final MutableDirectBuffer extBuffer; private final MutableDirectBuffer kafkaHeadersBuffer; private final MutableDirectBuffer willMessageBuffer; - private final MutableDirectBuffer willSignalBuffer; + private final MutableDirectBuffer sessionSignalBuffer; private final MutableDirectBuffer willKeyBuffer; - private final MutableDirectBuffer willSignalKeyBuffer; + private final MutableDirectBuffer sessionSignalKeyBuffer; + private final MutableDirectBuffer sessionExtBuffer; private final BufferPool bufferPool; private final BindingHandler streamFactory; private final Signaler signaler; @@ -175,9 +189,14 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final int coreIndex; private final Supplier supplyTraceId; private final Object2ObjectHashMap willDeliverIds; + private final Object2LongHashMap sessionExpiryIds; private final InstanceId instanceId; private final boolean willAvailable; private final int reconnectDelay; + private final int sessionExpiryIntervalMaxMillis; + private final int sessionExpiryIntervalMinMillis; + private static AtomicInteger contextCounter = new AtomicInteger(0); + private int reconnectAttempt; public MqttKafkaSessionFactory( @@ -193,9 +212,10 @@ public MqttKafkaSessionFactory( this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.kafkaHeadersBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.willMessageBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); - this.willSignalBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.sessionSignalBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.willKeyBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); - this.willSignalKeyBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.sessionSignalKeyBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.sessionExtBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.bufferPool = context.bufferPool(); this.helper = new MqttKafkaHeaderHelper(); this.streamFactory = context.streamFactory(); @@ -212,8 +232,11 @@ public MqttKafkaSessionFactory( this.coreIndex = context.index(); this.willAvailable = config.willAvailable(); this.willDeliverIds = new Object2ObjectHashMap<>(); + this.sessionExpiryIds = new Object2LongHashMap<>(-1); this.instanceId = instanceId; this.reconnectDelay = reconnectDelay.getAsInt(config); + this.sessionExpiryIntervalMaxMillis = config.sessionExpiryIntervalMax(); + this.sessionExpiryIntervalMinMillis = config.sessionExpiryIntervalMin(); } @Override @@ -257,7 +280,7 @@ public void onAttached( Optional route = binding.routes.stream().findFirst(); final long routeId = route.map(mqttKafkaRouteConfig -> mqttKafkaRouteConfig.id).orElse(0L); - binding.willProxy = new KafkaWillProxy(binding.id, routeId, + binding.willProxy = new KafkaSignalStream(binding.id, routeId, binding.sessionsTopic(), binding.messagesTopic(), binding.retainedTopic()); binding.willProxy.doKafkaBegin(currentTimeMillis()); } @@ -288,8 +311,8 @@ private final class MqttSessionProxy private final String16FW sessionId; private final String16FW sessionsTopic; private String lifetimeId; - private KafkaSessionProxy session; - private KafkaGroupProxy group; + private KafkaSessionStream session; + private KafkaGroupStream group; private int state; private long initialSeq; @@ -306,7 +329,7 @@ private final class MqttSessionProxy private String serverRef; private int sessionExpiryMillis; private int sessionFlags; - private int willPadding; + private int sessionPadding; private String willId; private int delay; @@ -324,7 +347,7 @@ private MqttSessionProxy( this.routedId = routedId; this.initialId = initialId; this.replyId = supplyReplyId.applyAsLong(initialId); - this.session = new KafkaFetchWillSignalProxy(originId, resolvedId, this); + this.session = new KafkaFetchWillSignalStream(originId, resolvedId, this); this.sessionsTopic = sessionsTopic; this.sessionId = new String16FW(sessionIds.get(bindingId)); } @@ -393,22 +416,23 @@ private void onMqttBegin( this.clientId = new String16FW(clientId0); this.clientIdMigrate = new String16FW(clientId0 + MIGRATE_KEY_POSTFIX); - final int sessionExpiry = mqttSessionBeginEx.expiry(); - sessionExpiryMillis = mqttSessionBeginEx.expiry() == 0 ? Integer.MAX_VALUE : (int) SECONDS.toMillis(sessionExpiry); + sessionExpiryMillis = (int) SECONDS.toMillis(mqttSessionBeginEx.expiry()); sessionFlags = mqttSessionBeginEx.flags(); serverRef = mqttSessionBeginEx.serverRef().asString(); if (!isSetWillFlag(sessionFlags) || isSetCleanStart(sessionFlags)) { final long routedId = session.routedId; - session = new KafkaSessionSignalProxy(originId, routedId, this); + session = new KafkaSessionSignalStream(originId, routedId, this); } if (isSetWillFlag(sessionFlags)) { - final int willSignalSize = clientId.sizeof() + SIZE_OF_INT + SIZE_OF_LONG + SIZE_OF_UUID + SIZE_OF_UUID + + final int willSignalSize = 1 + clientId.sizeof() + SIZE_OF_INT + SIZE_OF_LONG + SIZE_OF_UUID + SIZE_OF_UUID + instanceId.instanceId().sizeof(); - willPadding = willSignalSize + SIZE_OF_UUID + SIZE_OF_UUID; + sessionPadding = willSignalSize + SIZE_OF_UUID + SIZE_OF_UUID; } + final int expirySignalSize = 1 + clientId.sizeof() + SIZE_OF_INT + SIZE_OF_LONG + instanceId.instanceId().sizeof(); + sessionPadding += expirySignalSize; session.doKafkaBeginIfNecessary(traceId, authorization, affinity); } @@ -464,7 +488,7 @@ private void onMqttData( .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(key.length()) .value(key.value(), 0, key.length())) @@ -498,39 +522,41 @@ private void onMqttData( String16FW willSignalKey = new String16FW.Builder() - .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(willSignalKey.length()) .value(willSignalKey.value(), 0, willSignalKey.length())) .hashKey(b -> b.length(clientId.length()) .value(clientId.value(), 0, clientId.length())) .headersItem(h -> - h.nameLen(TYPE_HEADER_NAME.length()) - .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) - .valueLen(WILL_SIGNAL_NAME.length()) - .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(WILL_SIGNAL_NAME_OCTETS.sizeof()) + .value(WILL_SIGNAL_NAME_OCTETS))) .build(); - final MqttWillSignalFW willSignal = - mqttWillSignalRW.wrap(willSignalBuffer, 0, willSignalBuffer.capacity()) - .clientId(clientId) - .delay(delay) - .deliverAt(MqttWillDeliverAt.UNKNOWN.value()) - .lifetimeId(lifetimeId) - .willId(willId) - .instanceId(instanceId.instanceId()) + final MqttSessionSignalFW willSignal = + mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) + .will(w -> w + .instanceId(instanceId.instanceId()) + .clientId(clientId) + .delay(delay) + .deliverAt(MqttTime.UNKNOWN.value()) + .lifetimeId(lifetimeId) + .willId(willId)) .build(); session.doKafkaData(traceId, authorization, budgetId, willSignal.sizeof(), flags, willSignal, willSignalKafkaDataEx); + doFlushProduceAndFetchWithFilter(traceId, authorization, budgetId); break; case STATE: @@ -539,7 +565,7 @@ private void onMqttData( .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(clientId.length()) .value(clientId.value(), 0, clientId.length()))) @@ -611,7 +637,7 @@ private void onMqttEnd( .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(key.length()) .value(key.value(), 0, key.length())) @@ -623,30 +649,40 @@ private void onMqttEnd( null, kafkaWillDataEx); String16FW willSignalKey = new String16FW.Builder() - .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(willSignalKey.length()) .value(willSignalKey.value(), 0, willSignalKey.length())) .hashKey(b -> b.length(clientId.length()) .value(clientId.value(), 0, clientId.length())) .headersItem(h -> - h.nameLen(TYPE_HEADER_NAME.length()) - .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) - .valueLen(WILL_SIGNAL_NAME.length()) - .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(WILL_SIGNAL_NAME_OCTETS.sizeof()) + .value(WILL_SIGNAL_NAME_OCTETS))) .build(); session.doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, null, willSignalKafkaDataEx); } + final MqttSessionSignalFW expirySignal = + mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) + .expiry(w -> w + .instanceId(instanceId.instanceId()) + .clientId(clientId) + .delay(sessionExpiryMillis) + .expireAt(supplyTime.getAsLong() + sessionExpiryMillis)) + .build(); + session.sendExpirySignal(authorization, traceId, expirySignal); // expire at expireAt + session.doKafkaEnd(traceId, authorization); if (group != null) { @@ -674,6 +710,16 @@ private void onMqttAbort( { session.sendWillSignal(traceId, authorization); } + final MqttSessionSignalFW expirySignal = + mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) + .expiry(w -> w + .instanceId(instanceId.instanceId()) + .clientId(clientId) + .delay(sessionExpiryMillis) + .expireAt(supplyTime.getAsLong() + sessionExpiryMillis)) + .build(); + session.sendExpirySignal(authorization, traceId, expirySignal); // expire at expireAt + session.doKafkaAbort(traceId, authorization); if (group != null) { @@ -742,7 +788,8 @@ private void onMqttWindow( private void doMqttBegin( long traceId, long authorization, - long affinity) + long affinity, + Flyweight extension) { if (!MqttKafkaState.replyOpening(state)) { @@ -752,7 +799,7 @@ private void doMqttBegin( state = MqttKafkaState.openingReply(state); doBegin(mqtt, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, affinity); + traceId, authorization, affinity, extension); } } @@ -827,7 +874,7 @@ private void doMqttWindow( int padding, int capabilities) { - initialAck = session.initialAck; + initialAck = session.initialAck - padding; initialMax = session.initialMax; doWindow(mqtt, originId, routedId, initialId, initialSeq, initialAck, initialMax, @@ -847,7 +894,7 @@ private void doMqttReset( } } - public final class KafkaWillProxy + public final class KafkaSignalStream { private MessageConsumer kafka; private final long originId; @@ -857,7 +904,8 @@ public final class KafkaWillProxy private final String16FW sessionsTopic; private final String16FW messagesTopic; private final String16FW retainedTopic; - private final Object2ObjectHashMap willFetchers; + private final Object2ObjectHashMap willFetchers; + private final Int2ObjectHashMap expiryClientIds; private IntHashSet partitions; private int state; @@ -867,7 +915,7 @@ public final class KafkaWillProxy private int replyMax; private long reconnectAt; - private KafkaWillProxy( + private KafkaSignalStream( long originId, long routedId, String16FW sessionsTopic, @@ -882,6 +930,7 @@ private KafkaWillProxy( this.retainedTopic = retainedTopic; this.replyId = supplyReplyId.applyAsLong(initialId); this.willFetchers = new Object2ObjectHashMap<>(); + this.expiryClientIds = new Int2ObjectHashMap<>(); this.partitions = new IntHashSet(); } @@ -906,7 +955,7 @@ private void doKafkaBegin( state = MqttKafkaState.openingInitial(state); - kafka = newWillStream(this::onWillMessage, originId, routedId, initialId, 0, 0, 0, + kafka = newSignalStream(this::onSignalMessage, originId, routedId, initialId, 0, 0, 0, traceId, authorization, affinity, sessionsTopic); } @@ -937,7 +986,7 @@ private void doKafkaAbort( } } - private void onWillMessage( + private void onSignalMessage( int msgTypeId, DirectBuffer buffer, int index, @@ -969,9 +1018,48 @@ private void onWillMessage( final ResetFW reset = resetRO.wrap(buffer, index, index + length); onKafkaReset(reset); break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onSignal(signal); + break; + } + } + + private void onSignal(SignalFW signal) + { + final int signalId = signal.signalId(); + + switch (signalId) + { + case SIGNAL_EXPIRE_SESSION: + onKafkaSessionExpirySignal(signal); + break; + default: + break; } } + private void onKafkaSessionExpirySignal( + SignalFW signal) + { + String16FW clientId = expiryClientIds.get(signal.contextId()); + + Flyweight expireSessionKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(System.currentTimeMillis()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length())) + .hashKey(b -> b.length(clientId.length()) + .value(clientId.value(), 0, clientId.length()))) + .build(); + + doKafkaData(supplyTraceId.get(), 0, expireSessionKafkaDataEx); + } + private void onKafkaBegin( BeginFW begin) { @@ -1026,49 +1114,84 @@ private void onKafkaData( kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; - fetchWill: + reactToSignal: if (key != null) { if (payload == null) { + final OctetsFW type = kafkaMergedDataEx.headers() + .matchFirst(h -> h.name().equals(TYPE_HEADER_NAME_OCTETS)).value(); + + final String keyPostfix = type.equals(WILL_SIGNAL_NAME_OCTETS) ? + WILL_SIGNAL_KEY_POSTFIX : EXPIRY_SIGNAL_KEY_POSTFIX; + final String clientId0 = key.value() - .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o - WILL_SIGNAL_KEY_POSTFIX.length())); - String16FW clientId = new String16FW(clientId0); - if (willDeliverIds.containsKey(clientId)) + .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o - keyPostfix.length())); + final String16FW clientId = new String16FW(clientId0); + + if (type.equals(WILL_SIGNAL_NAME_OCTETS) && willDeliverIds.containsKey(clientId)) { willDeliverIds.get(clientId).forEach(signaler::cancel); - KafkaFetchWillProxy willFetcher = willFetchers.get(clientId); + KafkaFetchWillStream willFetcher = willFetchers.get(clientId); if (willFetcher != null) { willFetcher.cleanup(traceId, authorization); } } - break fetchWill; + else if (type.equals(EXPIRY_SIGNAL_NAME_OCTETS) && sessionExpiryIds.containsKey(clientId)) + { + signaler.cancel(sessionExpiryIds.get(clientId)); + } + + break reactToSignal; } - MqttWillSignalFW willSignal = - mqttWillSignalRO.tryWrap(payload.buffer(), payload.offset(), payload.limit()); - if (willSignal != null) + final MqttSessionSignalFW sessionSignal = + mqttSessionSignalRO.wrap(payload.buffer(), payload.offset(), payload.limit()); + + switch (sessionSignal.kind()) { + case MqttSessionSignalFW.KIND_WILL: + final MqttWillSignalFW willSignal = sessionSignal.will(); long deliverAt = willSignal.deliverAt(); - final String16FW clientId = willSignal.clientId(); + final String16FW willClientId = willSignal.clientId(); - if (deliverAt == MqttWillDeliverAt.UNKNOWN.value()) + if (deliverAt == MqttTime.UNKNOWN.value()) { - if (!instanceId.instanceId().equals(willSignal.instanceId())) + if (instanceId.instanceId().equals(willSignal.instanceId())) { - deliverAt = supplyTime.getAsLong() + willSignal.delay(); + break reactToSignal; } - else + deliverAt = supplyTime.getAsLong() + willSignal.delay(); + } + + KafkaFetchWillStream willFetcher = + new KafkaFetchWillStream(originId, routedId, this, sessionsTopic, willClientId, + willSignal.willId().asString(), willSignal.lifetimeId().asString(), deliverAt); + willFetcher.doKafkaBegin(traceId, authorization, 0, willSignal.lifetimeId()); + willFetchers.put(new String16FW(willClientId.asString()), willFetcher); + break; + case MqttSessionSignalFW.KIND_EXPIRY: + final MqttExpirySignalFW expirySignal = sessionSignal.expiry(); + long expireAt = expirySignal.expireAt(); + final String16FW expiryClientId = expirySignal.clientId(); + + if (expireAt == MqttTime.UNKNOWN.value()) + { + if (instanceId.instanceId().equals(expirySignal.instanceId())) { - break fetchWill; + break reactToSignal; } + expireAt = supplyTime.getAsLong() + expirySignal.delay(); } - KafkaFetchWillProxy willFetcher = new KafkaFetchWillProxy(originId, routedId, this, sessionsTopic, - clientId, willSignal.willId().asString(), willSignal.lifetimeId().asString(), deliverAt); - willFetcher.doKafkaBegin(traceId, authorization, 0, willSignal.lifetimeId()); - willFetchers.put(clientId, willFetcher); + final int contextId = contextCounter.incrementAndGet(); + expiryClientIds.put(contextId, expiryClientId); + + final long signalId = + signaler.signalAt(expireAt, originId, routedId, initialId, SIGNAL_EXPIRE_SESSION, contextId); + sessionExpiryIds.put(expiryClientId, signalId); + break; } } } @@ -1238,9 +1361,9 @@ private void doKafkaData( } } - private final class KafkaFetchWillProxy + private final class KafkaFetchWillStream { - private final KafkaWillProxy delegate; + private final KafkaSignalStream delegate; private final String16FW topic; private final String16FW clientId; private final String lifetimeId; @@ -1266,14 +1389,14 @@ private final class KafkaFetchWillProxy private int dataSlot = NO_SLOT; private int messageSlotOffset; private int messageSlotReserved; - private KafkaProduceWillProxy willProducer; - private KafkaProduceWillProxy willRetainProducer; + private KafkaProduceWillStream willProducer; + private KafkaProduceWillStream willRetainProducer; private int willMessageAckCount; - private KafkaFetchWillProxy( + private KafkaFetchWillStream( long originId, long routedId, - KafkaWillProxy delegate, + KafkaSignalStream delegate, String16FW topic, String16FW clientId, String willId, @@ -1437,7 +1560,7 @@ private void onKafkaData( if (key != null && payload != null) { MqttWillMessageFW willMessage = - mqttWillRO.tryWrap(payload.buffer(), payload.offset(), payload.limit()); + mqttWillRO.wrap(payload.buffer(), payload.offset(), payload.limit()); if (willId.equals(willMessage.willId().asString())) { @@ -1458,13 +1581,13 @@ private void onKafkaData( messageSlotReserved = willMessage.sizeof(); willProducer = - new KafkaProduceWillProxy(originId, routedId, this, delegate.messagesTopic, deliverAt); + new KafkaProduceWillStream(originId, routedId, this, delegate.messagesTopic, deliverAt); willProducer.doKafkaBegin(traceId, authorization, 0); willMessageAckCount++; if ((willMessage.flags() & 1 << MqttPublishFlags.RETAIN.value()) != 0) { willRetainProducer = - new KafkaProduceWillProxy(originId, routedId, this, delegate.retainedTopic, deliverAt); + new KafkaProduceWillStream(originId, routedId, this, delegate.retainedTopic, deliverAt); willRetainProducer.doKafkaBegin(traceId, authorization, 0); willMessageAckCount++; } @@ -1553,7 +1676,7 @@ private void onWillMessageAcked( .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(key.length()) .value(key.value(), 0, key.length())) @@ -1564,24 +1687,24 @@ private void onWillMessageAcked( delegate.doKafkaData(traceId, authorization, kafkaWillDataEx); String16FW willSignalKey = new String16FW.Builder() - .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) .set(clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(willSignalKey.length()) .value(willSignalKey.value(), 0, willSignalKey.length())) .hashKey(b -> b.length(clientId.length()) .value(clientId.value(), 0, clientId.length())) .headersItem(h -> - h.nameLen(TYPE_HEADER_NAME.length()) - .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) - .valueLen(WILL_SIGNAL_NAME.length()) - .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(WILL_SIGNAL_NAME_OCTETS.sizeof()) + .value(WILL_SIGNAL_NAME_OCTETS))) .build(); delegate.doKafkaData(traceId, authorization, willSignalKafkaDataEx); @@ -1591,7 +1714,7 @@ private void onWillMessageAcked( } } - private final class KafkaProduceWillProxy + private final class KafkaProduceWillStream { private MessageConsumer kafka; private final long originId; @@ -1600,7 +1723,7 @@ private final class KafkaProduceWillProxy private final String16FW kafkaTopic; private final long deliverAt; private final long replyId; - private final KafkaFetchWillProxy delegate; + private final KafkaFetchWillStream delegate; private int state; @@ -1613,10 +1736,10 @@ private final class KafkaProduceWillProxy private int replyMax; private int replyPad; - private KafkaProduceWillProxy( + private KafkaProduceWillStream( long originId, long routedId, - KafkaFetchWillProxy delegate, + KafkaFetchWillStream delegate, String16FW kafkaTopic, long deliverAt) { @@ -1705,12 +1828,13 @@ private void onKafkaMessage( break; case SignalFW.TYPE_ID: final SignalFW signal = signalRO.wrap(buffer, index, index + length); - onSignal(signal); + onKafkaSignal(signal); break; } } - private void onSignal(SignalFW signal) + private void onKafkaSignal( + SignalFW signal) { final int signalId = signal.signalId(); @@ -1886,7 +2010,7 @@ private void sendWill( .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.set(key)) .headers(kafkaHeadersRW.build())) @@ -2020,7 +2144,7 @@ private static boolean isSetCleanStart( return (flags & MqttSessionFlags.CLEAN_START.value() << 1) != 0; } - private abstract class KafkaSessionProxy + private abstract class KafkaSessionStream { protected MessageConsumer kafka; protected final long originId; @@ -2040,7 +2164,7 @@ private abstract class KafkaSessionProxy protected int replyMax; protected int replyPad; - private KafkaSessionProxy( + private KafkaSessionStream( long originId, long routedId, MqttSessionProxy delegate) @@ -2063,7 +2187,7 @@ private void doKafkaBeginIfNecessary( } } - protected void doKafkaData( + protected final void doKafkaData( long traceId, long authorization, long budgetId, @@ -2081,46 +2205,107 @@ protected void doKafkaData( assert initialSeq <= initialAck + initialMax; } + protected final void cancelExpirySignal( + long authorization, + long traceId) + { + String16FW expirySignalKey = new String16FW.Builder() + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) + .set(delegate.clientId.asString() + EXPIRY_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + Flyweight expirySignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(System.currentTimeMillis()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(expirySignalKey.length()) + .value(expirySignalKey.value(), 0, expirySignalKey.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(EXPIRY_SIGNAL_NAME_OCTETS.sizeof()) + .value(EXPIRY_SIGNAL_NAME_OCTETS))) + .build(); + + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, + null, expirySignalKafkaDataEx); + } + + protected final void sendExpirySignal( + long authorization, + long traceId, + Flyweight payload) + { + String16FW expirySignalKey = new String16FW.Builder() + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) + .set(delegate.clientId.asString() + EXPIRY_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); + Flyweight expirySignalKafkaDataEx = kafkaDataExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .merged(m -> m + .deferred(0) + .timestamp(System.currentTimeMillis()) + .partition(p -> p.partitionId(-1).partitionOffset(-1)) + .key(b -> b.length(expirySignalKey.length()) + .value(expirySignalKey.value(), 0, expirySignalKey.length())) + .hashKey(b -> b.length(delegate.clientId.length()) + .value(delegate.clientId.value(), 0, delegate.clientId.length())) + .headersItem(h -> + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(EXPIRY_SIGNAL_NAME_OCTETS.sizeof()) + .value(EXPIRY_SIGNAL_NAME_OCTETS))) + .build(); + + + doKafkaData(traceId, authorization, 0, payload.sizeof(), DATA_FLAG_COMPLETE, + payload, expirySignalKafkaDataEx); + } + private void sendWillSignal( long traceId, long authorization) { String16FW willSignalKey = new String16FW.Builder() - .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) .set(delegate.clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(willSignalKey.length()) .value(willSignalKey.value(), 0, willSignalKey.length())) .hashKey(b -> b.length(delegate.clientId.length()) .value(delegate.clientId.value(), 0, delegate.clientId.length())) .headersItem(h -> - h.nameLen(TYPE_HEADER_NAME.length()) - .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) - .valueLen(WILL_SIGNAL_NAME.length()) - .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(WILL_SIGNAL_NAME_OCTETS.sizeof()) + .value(WILL_SIGNAL_NAME_OCTETS))) .build(); - final MqttWillSignalFW willSignal = - mqttWillSignalRW.wrap(willSignalBuffer, 0, willSignalBuffer.capacity()) - .clientId(delegate.clientId) - .delay(delegate.delay) - .deliverAt(supplyTime.getAsLong() + delegate.delay) - .lifetimeId(delegate.lifetimeId) - .willId(delegate.willId) - .instanceId(instanceId.instanceId()) - .build(); + final MqttSessionSignalFW willSignal = + mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) + .will(w -> w + .instanceId(instanceId.instanceId()) + .clientId(delegate.clientId) + .delay(delegate.delay) + .deliverAt(supplyTime.getAsLong() + delegate.delay) + .lifetimeId(delegate.lifetimeId) + .willId(delegate.willId)) + .build(); doKafkaData(traceId, authorization, 0, willSignal.sizeof(), DATA_FLAG_COMPLETE, willSignal, willSignalKafkaDataEx); } - private void doKafkaData( + protected void doKafkaData( long traceId, long authorization, long budgetId, @@ -2243,7 +2428,10 @@ private void onKafkaBegin( assert replyAck <= replySeq; - delegate.doMqttBegin(traceId, authorization, affinity); + if (isSetWillFlag(delegate.sessionFlags)) + { + delegate.doMqttBegin(traceId, authorization, affinity, EMPTY_OCTETS); + } doKafkaWindow(traceId, authorization, 0, 0); } @@ -2336,7 +2524,7 @@ protected void sendMigrateSignal(long authorization, long traceId) .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(delegate.clientIdMigrate.length()) .value(delegate.clientIdMigrate.value(), 0, delegate.clientIdMigrate.length())) @@ -2352,7 +2540,7 @@ protected void sendMigrateSignal(long authorization, long traceId) EMPTY_OCTETS, kafkaMigrateDataEx); } - private void onKafkaReset( + protected void onKafkaReset( ResetFW reset) { final long sequence = reset.sequence(); @@ -2360,11 +2548,6 @@ private void onKafkaReset( final long traceId = reset.traceId(); assert acknowledge <= sequence; - assert acknowledge >= delegate.initialAck; - - delegate.initialAck = acknowledge; - - assert delegate.initialAck <= delegate.initialSeq; final OctetsFW extension = reset.extension(); final ExtensionFW resetEx = extension.get(extensionRO::tryWrap); @@ -2413,9 +2596,9 @@ private void doKafkaWindow( } } - private final class KafkaSessionSignalProxy extends KafkaSessionProxy + private final class KafkaSessionSignalStream extends KafkaSessionStream { - private KafkaSessionSignalProxy( + private KafkaSessionSignalStream( long originId, long routedId, MqttSessionProxy delegate) @@ -2483,17 +2666,17 @@ protected void onKafkaWindow( if (!wasOpen) { - sendMigrateSignal(authorization, traceId); - final long routedId = delegate.session.routedId; - delegate.group = new KafkaGroupProxy(originId, routedId, delegate); + delegate.group = new KafkaGroupStream(originId, routedId, delegate); delegate.group.doKafkaBegin(traceId, authorization, 0); + + sendMigrateSignal(authorization, traceId); } } } - private final class KafkaSessionStateProxy extends KafkaSessionProxy + private final class KafkaSessionStateProxy extends KafkaSessionStream { private KafkaSessionStateProxy( long originId, @@ -2542,15 +2725,16 @@ protected void handleKafkaData( kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; - if (key != null) + if (key != null && payload != null) { - if (key.length() == (delegate.clientId.length())) + int keyLen = key.length(); + if (keyLen == delegate.clientId.length()) { MqttSessionStateFW sessionState = - mqttSessionStateRO.tryWrap(payload.buffer(), payload.offset(), payload.limit()); + mqttSessionStateRO.wrap(payload.buffer(), payload.offset(), payload.limit()); delegate.doMqttData(traceId, authorization, budgetId, reserved, flags, sessionState); } - else if (key.length() == delegate.clientIdMigrate.length()) + else if (keyLen == delegate.clientIdMigrate.length()) { delegate.group.doKafkaFlush(traceId, authorization, budgetId, reserved); } @@ -2581,35 +2765,51 @@ protected void onKafkaWindow( assert initialAck <= initialSeq; - if (!wasOpen && !isSetCleanStart(delegate.sessionFlags)) + if (!wasOpen) { - cancelWillSignal(authorization, traceId); + if (!isSetCleanStart(delegate.sessionFlags)) + { + cancelWillSignal(authorization, traceId); + } + cancelExpirySignal(authorization, traceId); // expiry cancellation + + final MqttSessionSignalFW expirySignal = + mqttSessionSignalRW.wrap(sessionSignalBuffer, 0, sessionSignalBuffer.capacity()) + .expiry(w -> w + .instanceId(instanceId.instanceId()) + .clientId(delegate.clientId) + .delay(delegate.sessionExpiryMillis) + .expireAt(MqttTime.UNKNOWN.value())) + .build(); + sendExpirySignal(authorization, traceId, expirySignal); // expire later } - delegate.doMqttWindow(authorization, traceId, budgetId, padding + delegate.willPadding, capabilities); + delegate.doMqttWindow(authorization, traceId, budgetId, padding + delegate.sessionPadding, capabilities); } - private void cancelWillSignal(long authorization, long traceId) + private void cancelWillSignal( + long authorization, + long traceId) { String16FW willSignalKey = new String16FW.Builder() - .wrap(willSignalKeyBuffer, 0, willSignalKeyBuffer.capacity()) + .wrap(sessionSignalKeyBuffer, 0, sessionSignalKeyBuffer.capacity()) .set(delegate.clientId.asString() + WILL_SIGNAL_KEY_POSTFIX, StandardCharsets.UTF_8).build(); Flyweight willSignalKafkaDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) .merged(m -> m .deferred(0) - .timestamp(now().toEpochMilli()) + .timestamp(System.currentTimeMillis()) .partition(p -> p.partitionId(-1).partitionOffset(-1)) .key(b -> b.length(willSignalKey.length()) .value(willSignalKey.value(), 0, willSignalKey.length())) .hashKey(b -> b.length(delegate.clientId.length()) .value(delegate.clientId.value(), 0, delegate.clientId.length())) .headersItem(h -> - h.nameLen(TYPE_HEADER_NAME.length()) - .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) - .valueLen(WILL_SIGNAL_NAME.length()) - .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length()))) + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(WILL_SIGNAL_NAME_OCTETS.sizeof()) + .value(WILL_SIGNAL_NAME_OCTETS))) .build(); doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, @@ -2658,9 +2858,9 @@ protected void onKafkaEnd( } } - private final class KafkaFetchWillSignalProxy extends KafkaSessionProxy + private final class KafkaFetchWillSignalStream extends KafkaSessionStream { - private KafkaFetchWillSignalProxy( + private KafkaFetchWillSignalStream( long originId, long routedId, MqttSessionProxy delegate) @@ -2696,11 +2896,14 @@ protected void handleKafkaData( kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaDataEx.merged() : null; final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; - if (key != null) + if (key != null && payload != null) { - MqttWillSignalFW willMessage = - mqttWillSignalRO.tryWrap(payload.buffer(), payload.offset(), payload.limit()); - delegate.lifetimeId = willMessage.lifetimeId().asString(); + MqttSessionSignalFW sessionSignal = + mqttSessionSignalRO.wrap(payload.buffer(), payload.offset(), payload.limit()); + if (sessionSignal != null) + { + delegate.lifetimeId = sessionSignal.will().lifetimeId().asString(); + } } } @@ -2723,12 +2926,12 @@ protected void onKafkaFlush( delegate.session.doKafkaEnd(traceId, authorization); final long routedId = delegate.session.routedId; - delegate.session = new KafkaSessionSignalProxy(originId, routedId, delegate); + delegate.session = new KafkaSessionSignalStream(originId, routedId, delegate); delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0); } } - private final class KafkaGroupProxy + private final class KafkaGroupStream { private MessageConsumer kafka; private final long originId; @@ -2748,7 +2951,7 @@ private final class KafkaGroupProxy private int replyMax; private int replyPad; - private KafkaGroupProxy( + private KafkaGroupStream( long originId, long routedId, MqttSessionProxy delegate) @@ -2868,7 +3071,23 @@ private void onKafkaBegin( assert replyAck <= replySeq; - delegate.doMqttBegin(traceId, authorization, affinity); + Flyweight mqttBeginEx = EMPTY_OCTETS; + + final int sessionExpiryMillisInRange = + Math.max(sessionExpiryIntervalMinMillis, Math.min(sessionExpiryIntervalMaxMillis, delegate.sessionExpiryMillis)); + if (delegate.sessionExpiryMillis != sessionExpiryMillisInRange) + { + mqttBeginEx = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder + .flags(delegate.sessionFlags) + .expiry((int) TimeUnit.MILLISECONDS.toSeconds(sessionExpiryMillisInRange)) + .clientId(delegate.clientId)) + .build(); + delegate.sessionExpiryMillis = sessionExpiryMillisInRange; + } + + delegate.doMqttBegin(traceId, authorization, affinity, mqttBeginEx); doKafkaWindow(traceId, authorization, 0, 0, 0); } @@ -2970,11 +3189,6 @@ private void onKafkaReset( final long traceId = reset.traceId(); assert acknowledge <= sequence; - assert acknowledge >= delegate.initialAck; - - delegate.initialAck = acknowledge; - - assert delegate.initialAck <= delegate.initialSeq; delegate.doMqttReset(traceId, EMPTY_OCTETS); } @@ -3017,7 +3231,8 @@ private void doBegin( int maximum, long traceId, long authorization, - long affinity) + long affinity, + Flyweight extension) { final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -3029,6 +3244,7 @@ private void doBegin( .traceId(traceId) .authorization(authorization) .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) .build(); receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); @@ -3414,7 +3630,7 @@ private MessageConsumer newKafkaStream( } - private MessageConsumer newWillStream( + private MessageConsumer newSignalStream( MessageConsumer sender, long originId, long routedId, @@ -3436,10 +3652,16 @@ private MessageConsumer newWillStream( .groupId(MQTT_CLIENTS_GROUP_ID) .filtersItem(f -> f.conditionsItem(c -> c.header(h -> - h.nameLen(TYPE_HEADER_NAME.length()) - .name(TYPE_HEADER_NAME.value(), 0, TYPE_HEADER_NAME.length()) - .valueLen(WILL_SIGNAL_NAME.length()) - .value(WILL_SIGNAL_NAME.value(), 0, WILL_SIGNAL_NAME.length())))) + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(WILL_SIGNAL_NAME_OCTETS.sizeof()) + .value(WILL_SIGNAL_NAME_OCTETS)))) + .filtersItem(f -> + f.conditionsItem(c -> c.header(h -> + h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) + .name(TYPE_HEADER_NAME_OCTETS) + .valueLen(EXPIRY_SIGNAL_NAME_OCTETS.sizeof()) + .value(EXPIRY_SIGNAL_NAME_OCTETS)))) .ackMode(b -> b.set(KAFKA_DEFAULT_ACK_MODE))) .build(); diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java index cc7a4145c3..40cbaa208b 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java @@ -19,6 +19,8 @@ import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.LIFETIME_ID; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.MESSAGES_TOPIC; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.RETAINED_MESSAGES_TOPIC; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_EXPIRY_INTERVAL_MAX; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_EXPIRY_INTERVAL_MIN; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_ID; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.TIME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.WILL_AVAILABLE; @@ -39,6 +41,8 @@ public class MqttKafkaConfigurationTest public static final String WILL_ID_NAME = "zilla.binding.mqtt.kafka.will.id"; public static final String LIFETIME_ID_NAME = "zilla.binding.mqtt.kafka.lifetime.id"; public static final String INSTANCE_ID_NAME = "zilla.binding.mqtt.kafka.instance.id"; + public static final String SESSION_EXPIRY_INTERVAL_MAX_NAME = "zilla.binding.mqtt.kafka.session.expiry.interval.max"; + public static final String SESSION_EXPIRY_INTERVAL_MIN_NAME = "zilla.binding.mqtt.kafka.session.expiry.interval.min"; @Test public void shouldVerifyConstants() @@ -52,5 +56,7 @@ public void shouldVerifyConstants() assertEquals(WILL_ID.name(), WILL_ID_NAME); assertEquals(LIFETIME_ID.name(), LIFETIME_ID_NAME); assertEquals(INSTANCE_ID.name(), INSTANCE_ID_NAME); + assertEquals(SESSION_EXPIRY_INTERVAL_MAX.name(), SESSION_EXPIRY_INTERVAL_MAX_NAME); + assertEquals(SESSION_EXPIRY_INTERVAL_MIN.name(), SESSION_EXPIRY_INTERVAL_MIN_NAME); } } diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java index b04b6c499c..be7f8a9369 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java @@ -16,6 +16,8 @@ import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.INSTANCE_ID_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.LIFETIME_ID_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_EXPIRY_INTERVAL_MAX_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_EXPIRY_INTERVAL_MIN_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_ID_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.TIME_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_AVAILABLE_NAME; @@ -53,6 +55,16 @@ public class MqttKafkaSessionProxyIT .counterValuesBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) + .configure(SESSION_ID_NAME, + "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + .configure(INSTANCE_ID_NAME, + "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") + .configure(TIME_NAME, + "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + .configure(LIFETIME_ID_NAME, + "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") + .configure(WILL_ID_NAME, + "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/kafka/config") .external("kafka0") .clean(); @@ -63,8 +75,31 @@ public class MqttKafkaSessionProxyIT @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") + @Configure(name = SESSION_EXPIRY_INTERVAL_MAX_NAME, value = "30000") + @Specification({ + "${mqtt}/session.connect.override.max.session.expiry/client", + "${kafka}/session.connect.override.max.session.expiry/server"}) + public void shouldConnectServerOverridesSessionExpiryTooBig() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") + @Configure(name = SESSION_EXPIRY_INTERVAL_MAX_NAME, value = "30000") + @Configure(name = SESSION_EXPIRY_INTERVAL_MIN_NAME, value = "2000") + @Specification({ + "${mqtt}/session.connect.override.min.session.expiry/client", + "${kafka}/session.connect.override.min.session.expiry/server"}) + public void shouldConnectServerOverridesSessionExpiryTooSmall() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Configure(name = WILL_AVAILABLE_NAME, value = "false") @Specification({ "${mqtt}/session.abort.reconnect.non.clean.start/client", "${kafka}/session.abort.reconnect.non.clean.start/server"}) @@ -76,12 +111,6 @@ public void shouldReconnectNonCleanStart() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") @Specification({ "${mqtt}/session.client.takeover/client", "${kafka}/session.client.takeover/server"}) @@ -93,12 +122,6 @@ public void shouldTakeOverSession() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") @Specification({ "${mqtt}/session.exists.clean.start/client", "${kafka}/session.exists.clean.start/server"}) @@ -110,8 +133,6 @@ public void shouldRemoveSessionAtCleanStart() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.subscribe/client", "${kafka}/session.subscribe/server"}) @@ -123,8 +144,6 @@ public void shouldSubscribeSaveSubscriptionsInSession() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.subscribe.via.session.state/client", "${kafka}/session.subscribe.via.session.state/server"}) @@ -136,8 +155,6 @@ public void shouldReceiveMessageSubscribedViaSessionState() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.unsubscribe.after.subscribe/client", "${kafka}/session.unsubscribe.after.subscribe/server"}) @@ -149,8 +166,6 @@ public void shouldUnsubscribeAndUpdateSessionState() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.unsubscribe.via.session.state/client", "${kafka}/session.unsubscribe.via.session.state/server"}) @@ -162,8 +177,6 @@ public void shouldUnsubscribeViaSessionState() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.client.sent.reset/client", "${kafka}/session.client.sent.reset/server"}) @@ -175,8 +188,6 @@ public void shouldSessionStreamReceiveClientSentReset() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.server.sent.reset/client", "${kafka}/session.server.sent.reset/server"}) @@ -188,8 +199,6 @@ public void shouldSessionStreamReceiveServerSentReset() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") @Specification({ "${mqtt}/session.server.sent.reset/client", "${kafka}/session.group.server.sent.reset/server"}) @@ -214,16 +223,35 @@ public void shouldRedirect() throws Exception @Test @Configuration("proxy.yaml") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = LIFETIME_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") - @Configure(name = WILL_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") + @Specification({ + "${mqtt}/session.close.expire.session.state/client", + "${kafka}/session.close.expire.session.state/server"}) + public void shouldExpireSessionOnClose() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${mqtt}/session.abort.expire.session.state/client", + "${kafka}/session.abort.expire.session.state/server"}) + public void shouldExpireSessionOnAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") + @Specification({ + "${kafka}/session.cancel.session.expiry/server"}) + public void shouldCancelSessionExpiry() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("proxy.yaml") @Specification({ "${mqtt}/session.will.message.normal.disconnect/client", "${kafka}/session.will.message.normal.disconnect/server"}) @@ -234,16 +262,6 @@ public void shouldNotSendWillMessageOnNormalDisconnect() throws Exception @Test @Configuration("proxy.yaml") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = LIFETIME_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") - @Configure(name = WILL_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") @Specification({ "${mqtt}/session.will.message.clean.start/client", "${kafka}/session.will.message.clean.start/server"}) @@ -254,16 +272,6 @@ public void shouldGenerateLifeTimeIdOnCleanStart() throws Exception @Test @Configuration("proxy.yaml") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = LIFETIME_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") - @Configure(name = WILL_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") @Specification({ "${mqtt}/session.will.message.abort.deliver.will/client", "${kafka}/session.will.message.abort.deliver.will/server"}) @@ -274,19 +282,9 @@ public void shouldSendWillMessageOnAbort() throws Exception @Test @Configuration("proxy.yaml") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = LIFETIME_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") - @Configure(name = WILL_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") @Specification({ "${mqtt}/session.will.message.abort.deliver.will/client", - "${kafka}/session.will.message.will.id.mismatch.no.deliver/server"}) + "${kafka}/session.will.message.will.id.mismatch.skip.delivery/server"}) public void shouldNotSendWillMessageOnWillIdMismatch() throws Exception { k3po.finish(); @@ -294,16 +292,6 @@ public void shouldNotSendWillMessageOnWillIdMismatch() throws Exception @Test @Configuration("proxy.yaml") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = LIFETIME_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") - @Configure(name = WILL_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") @Specification({ "${mqtt}/session.will.message.abort.deliver.will.retain/client", "${kafka}/session.will.message.abort.deliver.will.retain/server"}) @@ -314,43 +302,23 @@ public void shouldSaveWillMessageAsRetain() throws Exception @Test @Configuration("proxy.yaml") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = LIFETIME_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") - @Configure(name = WILL_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") @Specification({ - "${mqtt}/session.will.message.client.takeover.deliver.will/client", + "${mqtt}/session.will.message.takeover.deliver.will/client", "${kafka}/session.will.message.takeover.deliver.will/server"}) - public void shouldSendWillMessageOnAbortClientTakeover() throws Exception + public void shouldDeliverWillMessageOnSessionTakeover() throws Exception { k3po.finish(); } @Test @Configuration("proxy.yaml") - @Configure(name = SESSION_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplySessionId") - @Configure(name = LIFETIME_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyLifetimeId") - @Configure(name = WILL_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyWillId") - @Configure(name = INSTANCE_ID_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyInstanceId") - @Configure(name = TIME_NAME, - value = "io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionProxyIT::supplyTime") @Specification({ "${kafka}/session.will.message.cancel.delivery/server"}) public void shouldCancelWillDelivery() throws Exception { k3po.start(); Thread.sleep(1000); - k3po.notifyBarrier("WAIT_1_SECOND"); + k3po.notifyBarrier("ONE_SECOND_ELAPSED"); k3po.finish(); } diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index 6cca025d73..dc8c6f553b 100644 --- a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -32,11 +32,14 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.Array32FW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttBinaryFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttExpirySignalFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormat; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormatFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPublishFlags; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttQoS; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionFlags; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionSignalFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionSignalType; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionStateFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSubscribeFlags; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttTopicFilterFW; @@ -122,9 +125,9 @@ public static MqttWillMessageBuilder will() } @Function - public static MqttWillSignalBuilder willSignal() + public static MqttSessionSignalBuilder sessionSignal() { - return new MqttWillSignalBuilder(); + return new MqttSessionSignalBuilder(); } @Function @@ -880,67 +883,149 @@ public byte[] build() } } - public static final class MqttWillSignalBuilder + public static final class MqttSessionSignalBuilder { - private final MqttWillSignalFW.Builder willSignalRW = new MqttWillSignalFW.Builder(); + private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); - private MqttWillSignalBuilder() - { - MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); - willSignalRW.wrap(writeBuffer, 0, writeBuffer.capacity()); - } + private final MqttSessionSignalFW signalRO = new MqttSessionSignalFW(); - public MqttWillSignalBuilder clientId( - String clientId) - { - willSignalRW.clientId(clientId); - return this; - } + private final MqttSessionSignalFW.Builder signalRW = new MqttSessionSignalFW.Builder(); - public MqttWillSignalBuilder delay( - int delay) + + private MqttSessionSignalBuilder() { - willSignalRW.delay(delay); - return this; + signalRW.wrap(writeBuffer, 0, writeBuffer.capacity()); } - public MqttWillSignalBuilder deliverAt( - long deliverAt) + public MqttSessionExpirySignalBuilder expiry() { - willSignalRW.deliverAt(deliverAt); - return this; + signalRW.kind(MqttSessionSignalType.EXPIRY.value()); + + return new MqttSessionExpirySignalBuilder(); } - public MqttWillSignalBuilder lifetimeId( - String lifetimeId) + public MqttSessionWillSignalBuilder will() { - willSignalRW.lifetimeId(lifetimeId); - return this; + signalRW.kind(MqttSessionSignalType.WILL.value()); + + return new MqttSessionWillSignalBuilder(); } - public MqttWillSignalBuilder willId( - String willId) + public byte[] build() { - willSignalRW.willId(willId); - return this; + final MqttSessionSignalFW signal = signalRO; + final byte[] array = new byte[signal.sizeof()]; + signal.buffer().getBytes(signal.offset(), array); + return array; } - public MqttWillSignalBuilder instanceId( - String instanceId) + + public final class MqttSessionWillSignalBuilder { - willSignalRW.instanceId(instanceId); - return this; + private final MqttWillSignalFW.Builder willSignalRW = new MqttWillSignalFW.Builder(); + + private MqttSessionWillSignalBuilder() + { + willSignalRW.wrap(writeBuffer, MqttSessionSignalFW.FIELD_OFFSET_WILL, writeBuffer.capacity()); + } + + public MqttSessionWillSignalBuilder clientId( + String clientId) + { + willSignalRW.clientId(clientId); + return this; + } + + public MqttSessionWillSignalBuilder delay( + int delay) + { + willSignalRW.delay(delay); + return this; + } + + public MqttSessionWillSignalBuilder deliverAt( + long deliverAt) + { + willSignalRW.deliverAt(deliverAt); + return this; + } + + public MqttSessionWillSignalBuilder lifetimeId( + String lifetimeId) + { + willSignalRW.lifetimeId(lifetimeId); + return this; + } + + public MqttSessionWillSignalBuilder willId( + String willId) + { + willSignalRW.willId(willId); + return this; + } + + public MqttSessionWillSignalBuilder instanceId( + String instanceId) + { + willSignalRW.instanceId(instanceId); + return this; + } + + public MqttSessionSignalBuilder build() + { + final MqttWillSignalFW willSignal = willSignalRW.build(); + signalRO.wrap(writeBuffer, 0, willSignal.limit()); + return MqttSessionSignalBuilder.this; + } } - public byte[] build() + public final class MqttSessionExpirySignalBuilder { - final MqttWillSignalFW willSignal = willSignalRW.build(); - final byte[] array = new byte[willSignal.sizeof()]; - willSignal.buffer().getBytes(willSignal.offset(), array); - return array; + private final MqttExpirySignalFW.Builder expirySignalRW = new MqttExpirySignalFW.Builder(); + + private MqttSessionExpirySignalBuilder() + { + expirySignalRW.wrap(writeBuffer, MqttSessionSignalFW.FIELD_OFFSET_EXPIRY, writeBuffer.capacity()); + } + + public MqttSessionExpirySignalBuilder clientId( + String clientId) + { + expirySignalRW.clientId(clientId); + return this; + } + + public MqttSessionExpirySignalBuilder delay( + int delay) + { + expirySignalRW.delay(delay); + return this; + } + + public MqttSessionExpirySignalBuilder expireAt( + long expireAt) + { + expirySignalRW.expireAt(expireAt); + return this; + } + + public MqttSessionExpirySignalBuilder instanceId( + String instanceId) + { + expirySignalRW.instanceId(instanceId); + return this; + } + + public MqttSessionSignalBuilder build() + { + final MqttExpirySignalFW expirySignal = expirySignalRW.build(); + signalRO.wrap(writeBuffer, 0, expirySignal.limit()); + return MqttSessionSignalBuilder.this; + } } } + public static final class MqttBeginExMatcherBuilder { private final DirectBuffer bufferRO = new UnsafeBuffer(); diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index b1d22471d1..0a2ac23134 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -54,7 +54,7 @@ scope mqtt TEXT } - enum MqttWillDeliverAt (int64) + enum MqttTime (int64) { UNKNOWN(-1L) } @@ -85,14 +85,34 @@ scope mqtt MqttTopicFilter[] subscriptions; } + enum MqttSessionSignalType (uint8) + { + WILL (0), + EXPIRY (1) + } + + union MqttSessionSignal switch (uint8) + { + case 0: mqtt::MqttWillSignal will; + case 1: mqtt::MqttExpirySignal expiry; + } + struct MqttWillSignal { + string16 instanceId; string16 clientId; int32 delay = 0; int64 deliverAt = -1; string16 lifetimeId = null; string16 willId = null; + } + + struct MqttExpirySignal + { string16 instanceId; + string16 clientId; + int32 delay = 0; + int64 expireAt = -1; } struct MqttWillMessage diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt new file mode 100644 index 0000000000..27635e8ebe --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt @@ -0,0 +1,39 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(100) + .clientId("client-1") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(30) + .clientId("client-1") + .build() + .build()} + +connected + +read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt new file mode 100644 index 0000000000..0ecc9f30b8 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt @@ -0,0 +1,42 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(100) + .clientId("client-1") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(30) + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt index 2e7698520a..3115255b0b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build() .build()} @@ -32,7 +32,7 @@ read zilla:data.empty read zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverRef("localhost:1884") + .serverRef("mqtt-2.example.com:1883") .build()} write aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt index 41c25ac043..c0ec456d5e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build() .build()} @@ -35,7 +35,7 @@ write flush write zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverRef("localhost:1884") + .serverRef("mqtt-2.example.com:1883") .build()} read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt index 421ed4976a..209a8946c2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build() .build()} @@ -30,7 +30,7 @@ connected read zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverRef("localhost:1884") + .serverRef("mqtt-2.example.com:1883") .build()} write aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt index 6e8b43130e..b069bf351c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build() .build()} @@ -32,7 +32,7 @@ connected write zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .serverRef("localhost:1884") + .serverRef("mqtt-2.example.com:1883") .build()} read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt new file mode 100644 index 0000000000..d5b670b0cc --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + +write [0xe0 0x07] # DISCONNECT + [0x00] # normal disconnect + [0x05] # properties = none + [0x11] 1 # session expiry = 30 + +read [0xe0 0x02] # DISCONNECT + [0x82] # reason = protocol error + [0x00] # properties = none + +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt new file mode 100644 index 0000000000..ef44a24f56 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt @@ -0,0 +1,48 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + +read [0xe0 0x07] # DISCONNECT + [0x00] # normal disconnect + [0x05] # properties = none + [0x11] 1 # session expiry = 30 + +write [0xe0 0x02] # DISCONNECT + [0x82] # reason = protocol error + [0x00] # properties = none + +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt new file mode 100644 index 0000000000..8df72dba57 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt @@ -0,0 +1,39 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x15] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x00] # flags = none + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x11] 100 # session expiry interval + [0x00 0x08] "client-1" # client id + +read [0x20 0x0d] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x0a] # properties + [0x27] 66560 # maximum packet size + [0x11] 30 # session expiry interval + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt new file mode 100644 index 0000000000..1d4c90db61 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt @@ -0,0 +1,39 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x15] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x00] # flags = none + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x11] 100 # session expiry interval + [0x00 0x08] "client-1" # client id + +write [0x20 0x0d] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x0a] # properties + [0x27] 66560 # maximum packet size + [0x11] 30 # session expiry interval diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt index c5ee483918..6cd3a79255 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt @@ -21,23 +21,23 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -read [0x20 0x08] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x05] # properties = none - [0x27] 66560 # maximum packet size = 66560 +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -read [0xe0 0x13] # DISCONNECT - [0x9d] # reason code = Use another server - [0x11] # properties - [0x1c 0x00 0x0e] "localhost:1884" # server reference +read [0xe0 0x1c] # DISCONNECT + [0x9d] # reason code = Use another server + [0x1a] # properties + [0x1c 0x00 0x17] "mqtt-2.example.com:1883" # server reference diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt index 58120e20f9..1f9a0576de 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt @@ -22,21 +22,21 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x08] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x05] # properties = none - [0x27] 66560 # maximum packet size = 66560 +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 -write [0xe0 0x13] # DISCONNECT - [0x9d] # reason code = Use another server - [0x11] # properties - [0x1c 0x00 0x0e] "localhost:1884" # server reference +write [0xe0 0x1c] # DISCONNECT + [0x9d] # reason code = Use another server + [0x1a] # properties + [0x1c 0x00 0x17] "mqtt-2.example.com:1883" # server reference diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt index caddb654bb..ecb87b2a9c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt @@ -21,16 +21,16 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -read [0x20 0x14] # CONNACK - [0x00] # flags = none - [0x9d] # reason code = Use another server - [0x11] # properties - [0x1c 0x00 0x0e] "localhost:1884" # server reference +read [0x20 0x1d] # CONNACK + [0x00] # flags = none + [0x9d] # reason code = Use another server + [0x1a] # properties + [0x1c 0x00 0x17] "mqtt-2.example.com:1883" # server reference diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt index 1a77fee573..804087d118 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt @@ -22,16 +22,16 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x06] "client" # client id +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id -write [0x20 0x14] # CONNACK - [0x00] # flags = none - [0x9d] # reason code = Use another server - [0x11] # properties - [0x1c 0x00 0x0e] "localhost:1884" # server reference +write [0x20 0x1d] # CONNACK + [0x00] # flags = none + [0x9d] # reason code = Use another server + [0x1a] # properties + [0x1c 0x00 0x17] "mqtt-2.example.com:1883" # server reference diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index 43707aa657..c118e4530d 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -30,9 +30,10 @@ import org.kaazing.k3po.lang.el.BytesMatcher; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttPayloadFormat; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionSignalFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionSignalType; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttSessionStateFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttWillMessageFW; -import io.aklivity.zilla.specs.binding.mqtt.internal.types.MqttWillSignalFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttFlushExFW; @@ -64,7 +65,7 @@ public void shouldEncodeMqttSessionBeginExt() .flags("WILL", "CLEAN_START") .expiry(30) .clientId("client") - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build() .build(); @@ -73,7 +74,7 @@ public void shouldEncodeMqttSessionBeginExt() assertEquals(2, mqttBeginEx.kind()); assertEquals("client", mqttBeginEx.session().clientId().asString()); - assertEquals("localhost:1883", mqttBeginEx.session().serverRef().asString()); + assertEquals("mqtt-1.example.com:1883", mqttBeginEx.session().serverRef().asString()); assertEquals(30, mqttBeginEx.session().expiry()); assertEquals(6, mqttBeginEx.session().flags()); } @@ -286,7 +287,7 @@ public void shouldMatchSessionBeginExtension() throws Exception .flags("CLEAN_START") .expiry(10) .clientId("client") - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build() .build(); @@ -299,7 +300,7 @@ public void shouldMatchSessionBeginExtension() throws Exception .flags(2) .expiry(10) .clientId("client") - .serverRef("localhost:1883")) + .serverRef("mqtt-1.example.com:1883")) .build(); assertNotNull(matcher.match(byteBuf)); @@ -1174,13 +1175,13 @@ public void shouldEncodeMqttResetEx() { final byte[] array = MqttFunctions.resetEx() .typeId(0) - .serverRef("localhost:1883") + .serverRef("mqtt-1.example.com:1883") .build(); DirectBuffer buffer = new UnsafeBuffer(array); MqttResetExFW mqttResetEx = new MqttResetExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttResetEx.typeId()); - assertEquals("localhost:1883", mqttResetEx.serverRef().asString()); + assertEquals("mqtt-1.example.com:1883", mqttResetEx.serverRef().asString()); } @Test @@ -1275,45 +1276,94 @@ public void shouldEncodeWillMessageBytesPayload() @Test public void shouldEncodeWillSignal() { - final byte[] array = MqttFunctions.willSignal() + final byte[] array = MqttFunctions.sessionSignal() + .will() + .instanceId("zilla-1") + .clientId("client-1") + .delay(20) + .deliverAt(100000) + .lifetimeId("1") + .willId("2") + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttSessionSignalFW signal = new MqttSessionSignalFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals(MqttSessionSignalType.WILL.value(), signal.kind()); + assertEquals("client-1", signal.will().clientId().asString()); + assertEquals(20, signal.will().delay()); + assertEquals(100000, signal.will().deliverAt()); + assertEquals("1", signal.will().lifetimeId().asString()); + assertEquals("2", signal.will().willId().asString()); + assertEquals("zilla-1", signal.will().instanceId().asString()); + } + + @Test + public void shouldEncodeWillSignalUnknownDeliverAt() + { + final byte[] array = MqttFunctions.sessionSignal() + .will() + .instanceId("zilla-1") .clientId("client-1") .delay(20) - .deliverAt(100000) .lifetimeId("1") .willId("2") + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(array); + MqttSessionSignalFW signal = new MqttSessionSignalFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals(MqttSessionSignalType.WILL.value(), signal.kind()); + assertEquals("client-1", signal.will().clientId().asString()); + assertEquals(20, signal.will().delay()); + assertEquals(-1, signal.will().deliverAt()); + assertEquals("1", signal.will().lifetimeId().asString()); + assertEquals("2", signal.will().willId().asString()); + assertEquals("zilla-1", signal.will().instanceId().asString()); + } + + @Test + public void shouldEncodeExpirySignal() + { + final byte[] array = MqttFunctions.sessionSignal() + .expiry() .instanceId("zilla-1") + .clientId("client-1") + .delay(20) + .expireAt(100000) + .build() .build(); DirectBuffer buffer = new UnsafeBuffer(array); - MqttWillSignalFW willSignal = new MqttWillSignalFW().wrap(buffer, 0, buffer.capacity()); - - assertEquals("client-1", willSignal.clientId().asString()); - assertEquals(20, willSignal.delay()); - assertEquals(100000, willSignal.deliverAt()); - assertEquals("1", willSignal.lifetimeId().asString()); - assertEquals("2", willSignal.willId().asString()); - assertEquals("zilla-1", willSignal.instanceId().asString()); + MqttSessionSignalFW signal = new MqttSessionSignalFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals(MqttSessionSignalType.EXPIRY.value(), signal.kind()); + assertEquals("client-1", signal.expiry().clientId().asString()); + assertEquals(20, signal.expiry().delay()); + assertEquals(100000, signal.expiry().expireAt()); + assertEquals("zilla-1", signal.expiry().instanceId().asString()); } @Test - public void shouldEncodeWillSignalUnknownDeliverAt() + public void shouldEncodeExpirySignalUnknownExpiry() { - final byte[] array = MqttFunctions.willSignal() - .clientId("client-1") - .delay(20) - .lifetimeId("1") - .willId("2") - .instanceId("zilla-1") + final byte[] array = MqttFunctions.sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(20) + .build() .build(); DirectBuffer buffer = new UnsafeBuffer(array); - MqttWillSignalFW willSignal = new MqttWillSignalFW().wrap(buffer, 0, buffer.capacity()); - - assertEquals("client-1", willSignal.clientId().asString()); - assertEquals(20, willSignal.delay()); - assertEquals(-1, willSignal.deliverAt()); - assertEquals("1", willSignal.lifetimeId().asString()); - assertEquals("2", willSignal.willId().asString()); - assertEquals("zilla-1", willSignal.instanceId().asString()); + MqttSessionSignalFW signal = new MqttSessionSignalFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals(MqttSessionSignalType.EXPIRY.value(), signal.kind()); + assertEquals("client-1", signal.expiry().clientId().asString()); + assertEquals("zilla-1", signal.expiry().instanceId().asString()); + assertEquals(20, signal.expiry().delay()); + assertEquals(-1, signal.expiry().expireAt()); } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java index 196930aaf6..0b920583bc 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java @@ -46,6 +46,15 @@ public void shouldConnectWithSessionExpiry() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/session.connect.override.session.expiry/client", + "${app}/session.connect.override.session.expiry/server"}) + public void shouldConnectServerOverridesSessionExpiry() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/session.exists.clean.start/client", diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java index ff46963650..31b78f8578 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java @@ -427,4 +427,13 @@ public void shouldDisconnectAfterSubscribeAndPublish() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/disconnect.invalid.session.expiry/client", + "${net}/disconnect.invalid.session.expiry/server"}) + public void shouldRejectInvalidSessionExpiryOnDisconnect() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java index a3dd248b41..e80e349914 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java @@ -46,6 +46,15 @@ public void shouldConnectWithSessionExpiry() throws Exception k3po.finish(); } + @Test + @Specification({ + "${net}/session.connect.override.session.expiry/client", + "${net}/session.connect.override.session.expiry/server"}) + public void shouldConnectServerOverridesSessionExpiry() throws Exception + { + k3po.finish(); + } + // [MQTT-3.1.2-5], [MQTT-3.1.2-23] @Test @Specification({ diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java index c523fa435d..1e4a84e2d2 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java @@ -24,7 +24,6 @@ public class MqttConfiguration extends Configuration private static final ConfigurationDef MQTT_CONFIG; public static final LongPropertyDef CONNECT_TIMEOUT; public static final LongPropertyDef PUBLISH_TIMEOUT; - public static final IntPropertyDef SESSION_EXPIRY_INTERVAL; public static final ShortPropertyDef KEEP_ALIVE_MINIMUM; public static final ShortPropertyDef KEEP_ALIVE_MAXIMUM; public static final BytePropertyDef MAXIMUM_QOS; @@ -44,7 +43,6 @@ public class MqttConfiguration extends Configuration final ConfigurationDef config = new ConfigurationDef("zilla.binding.mqtt"); PUBLISH_TIMEOUT = config.property("publish.timeout", TimeUnit.SECONDS.toSeconds(30)); CONNECT_TIMEOUT = config.property("connect.timeout", TimeUnit.SECONDS.toSeconds(3)); - SESSION_EXPIRY_INTERVAL = config.property("session.expiry.interval", Integer.MAX_VALUE); //TODO: better default values? KEEP_ALIVE_MINIMUM = config.property("keep.alive.minimum", (short) 10); KEEP_ALIVE_MAXIMUM = config.property("keep.alive.maximum", (short) 1000); @@ -83,11 +81,6 @@ public boolean retainAvailable() return RETAIN_AVAILABLE.get(this); } - public int sessionExpiryInterval() - { - return SESSION_EXPIRY_INTERVAL.get(this); - } - public short keepAliveMinimum() { return KEEP_ALIVE_MINIMUM.get(this); diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 4bbe869c59..1cc23a6c69 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -150,6 +150,7 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttPublishDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttResetExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSessionBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSessionDataKind; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.ResetFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.SignalFW; @@ -238,6 +239,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttPublishDataExFW mqttPublishDataExRO = new MqttPublishDataExFW(); private final MqttDataExFW mqttSubscribeDataExRO = new MqttDataExFW(); private final MqttResetExFW mqttResetExRO = new MqttResetExFW(); + private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); private final MqttBeginExFW.Builder mqttPublishBeginExRW = new MqttBeginExFW.Builder(); private final MqttBeginExFW.Builder mqttSubscribeBeginExRW = new MqttBeginExFW.Builder(); @@ -363,7 +365,6 @@ public final class MqttServerFactory implements MqttStreamFactory private final long connectTimeoutMillis; private final int encodeBudgetMax; - private final int sessionExpiryIntervalLimit; private final short keepAliveMinimum; private final short keepAliveMaximum; private final byte maximumQos; @@ -412,7 +413,6 @@ public MqttServerFactory( this.mqttTypeId = context.supplyTypeId(MqttBinding.NAME); this.publishTimeoutMillis = SECONDS.toMillis(config.publishTimeout()); this.connectTimeoutMillis = SECONDS.toMillis(config.connectTimeout()); - this.sessionExpiryIntervalLimit = config.sessionExpiryInterval(); this.keepAliveMinimum = config.keepAliveMinimum(); this.keepAliveMaximum = config.keepAliveMaximum(); this.maximumQos = config.maximumQos(); @@ -1259,9 +1259,10 @@ private final class MqttServer private boolean connected; private short topicAliasMaximum = 0; - private int sessionExpiryInterval = 0; + private int connectSessionExpiry = 0; + private int sessionExpiry; private boolean assignedClientId = false; - private int propertyMask = 0; + private int decodablePropertyMask = 0; private int state; private long sessionId; @@ -1571,36 +1572,36 @@ private byte decodeConnectProperties( switch (mqttProperty.kind()) { case KIND_TOPIC_ALIAS_MAXIMUM: - if (isSetTopicAliasMaximum(propertyMask)) + if (isSetTopicAliasMaximum(decodablePropertyMask)) { topicAliasMaximum = 0; reasonCode = PROTOCOL_ERROR; break decode; } - this.propertyMask |= CONNECT_TOPIC_ALIAS_MAXIMUM_MASK; + this.decodablePropertyMask |= CONNECT_TOPIC_ALIAS_MAXIMUM_MASK; final short topicAliasMaximum = (short) (mqttProperty.topicAliasMaximum() & 0xFFFF); this.topicAliasMaximum = (short) Math.min(topicAliasMaximum, topicAliasMaximumLimit); break; case KIND_SESSION_EXPIRY: - if (isSetSessionExpiryInterval(propertyMask)) + if (isSetSessionExpiryInterval(decodablePropertyMask)) { - sessionExpiryInterval = 0; + connectSessionExpiry = 0; reasonCode = PROTOCOL_ERROR; break decode; } - this.propertyMask |= CONNECT_SESSION_EXPIRY_INTERVAL_MASK; - final int sessionExpiryInterval = (int) mqttProperty.sessionExpiry(); - this.sessionExpiryInterval = Math.min(sessionExpiryInterval, sessionExpiryIntervalLimit); + this.decodablePropertyMask |= CONNECT_SESSION_EXPIRY_INTERVAL_MASK; + this.connectSessionExpiry = (int) mqttProperty.sessionExpiry(); + this.sessionExpiry = connectSessionExpiry; break; case KIND_RECEIVE_MAXIMUM: case KIND_MAXIMUM_PACKET_SIZE: final int maxConnectPacketSize = (int) mqttProperty.maximumPacketSize(); - if (maxConnectPacketSize == 0 || isSetMaximumPacketSize(propertyMask)) + if (maxConnectPacketSize == 0 || isSetMaximumPacketSize(decodablePropertyMask)) { reasonCode = PROTOCOL_ERROR; break decode; } - this.propertyMask |= CONNECT_TOPIC_ALIAS_MAXIMUM_MASK; + this.decodablePropertyMask |= CONNECT_TOPIC_ALIAS_MAXIMUM_MASK; //TODO: remove this once we will support large messages maximumPacketSize = Math.min(maxConnectPacketSize, maximumPacketSize); break; @@ -1833,9 +1834,9 @@ private void resolveSession( { final MqttBeginExFW.Builder builder = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) .typeId(mqttTypeId) - .session(sessionBuilder -> sessionBuilder + .session(s -> s .flags(flags) - .expiry(sessionExpiryInterval) + .expiry(sessionExpiry) .clientId(clientId) .serverRef(serverRef) ); @@ -2244,22 +2245,74 @@ private void onDecodeDisconnect( long authorization, MqttDisconnectFW disconnect) { - state = MqttState.closingInitial(state); - if (session) + byte reasonCode = decodeDisconnectProperties(disconnect.properties()); + + if (reasonCode != SUCCESS) { - if (disconnect.reasonCode() == DISCONNECT_WITH_WILL_MESSAGE) - { - sessionStream.doSessionAbort(traceId); - } - else + onDecodeError(traceId, authorization, reasonCode); + decoder = decodeIgnoreAll; + } + else + { + if (session) { - sessionStream.doSessionAppEnd(traceId, EMPTY_OCTETS); + if (disconnect.reasonCode() == DISCONNECT_WITH_WILL_MESSAGE) + { + sessionStream.doSessionAbort(traceId); + } + else + { + sessionStream.doSessionAppEnd(traceId, EMPTY_OCTETS); + } } } + + state = MqttState.closingInitial(state); closeStreams(traceId, authorization); doNetworkEnd(traceId, authorization); } + private byte decodeDisconnectProperties( + MqttPropertiesFW properties) + { + byte reasonCode = SUCCESS; + + final OctetsFW propertiesValue = properties.value(); + final DirectBuffer decodeBuffer = propertiesValue.buffer(); + final int decodeOffset = propertiesValue.offset(); + final int decodeLimit = propertiesValue.limit(); + + decode: + for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) + { + final MqttPropertyFW mqttProperty = mqttPropertyRO.wrap(decodeBuffer, decodeProgress, decodeLimit); + switch (mqttProperty.kind()) + { + case KIND_SESSION_EXPIRY: + if (isSetSessionExpiryInterval(decodablePropertyMask)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNECT_SESSION_EXPIRY_INTERVAL_MASK; + final int sessionExpiryInterval = (int) mqttProperty.sessionExpiry(); + if (sessionExpiryInterval > 0 && this.sessionExpiry == 0) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + break; + default: + reasonCode = MALFORMED_PACKET; + break decode; + } + + decodeProgress = mqttProperty.limit(); + } + + return reasonCode; + } + private void onDecodeError( long traceId, long authorization, @@ -2556,10 +2609,10 @@ private void doEncodeConnack( .build(); propertiesSize = mqttProperty.limit(); - if (sessionExpiryInterval > sessionExpiryIntervalLimit) + if (connectSessionExpiry != sessionExpiry) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) - .sessionExpiry(sessionExpiryIntervalLimit) + .sessionExpiry(sessionExpiry) .build(); propertiesSize = mqttProperty.limit(); } @@ -3203,7 +3256,17 @@ private void onSessionBegin( state = MqttState.openReply(state); final long traceId = begin.traceId(); - final long authorization = begin.authorization(); + + final OctetsFW extension = begin.extension(); + if (extension.sizeof() > 0) + { + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SESSION; + final MqttSessionBeginExFW mqttSessionBeginEx = mqttBeginEx.session(); + + sessionExpiry = mqttSessionBeginEx.expiry(); + } doSessionWindow(traceId, encodeSlotOffset, encodeBudgetMax); } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java index 566864b6c7..3a3859e683 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java @@ -26,7 +26,6 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SERVER_REFERENCE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSIONS_AVAILABLE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSION_EXPIRY_GRACE_PERIOD; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSION_EXPIRY_INTERVAL; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SHARED_SUBSCRIPTION_AVAILABLE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SUBSCRIPTION_IDENTIFIERS_AVAILABLE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.TOPIC_ALIAS_MAXIMUM; @@ -39,7 +38,6 @@ public class MqttConfigurationTest { public static final String PUBLISH_TIMEOUT_NAME = "zilla.binding.mqtt.publish.timeout"; public static final String CONNECT_TIMEOUT_NAME = "zilla.binding.mqtt.connect.timeout"; - public static final String SESSION_EXPIRY_INTERVAL_NAME = "zilla.binding.mqtt.session.expiry.interval"; public static final String KEEP_ALIVE_MINIMUM_NAME = "zilla.binding.mqtt.keep.alive.minimum"; public static final String KEEP_ALIVE_MAXIMUM_NAME = "zilla.binding.mqtt.keep.alive.maximum"; public static final String MAXIMUM_QOS_NAME = "zilla.binding.mqtt.maximum.qos"; @@ -59,7 +57,6 @@ public void shouldVerifyConstants() throws Exception { assertEquals(PUBLISH_TIMEOUT.name(), PUBLISH_TIMEOUT_NAME); assertEquals(CONNECT_TIMEOUT.name(), CONNECT_TIMEOUT_NAME); - assertEquals(SESSION_EXPIRY_INTERVAL.name(), SESSION_EXPIRY_INTERVAL_NAME); assertEquals(KEEP_ALIVE_MINIMUM.name(), KEEP_ALIVE_MINIMUM_NAME); assertEquals(KEEP_ALIVE_MAXIMUM.name(), KEEP_ALIVE_MAXIMUM_NAME); assertEquals(MAXIMUM_QOS.name(), MAXIMUM_QOS_NAME); diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java index 95fd384785..baf4cdd51c 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java @@ -21,9 +21,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.RETAIN_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_EXPIRY_INTERVAL_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static io.aklivity.zilla.runtime.engine.test.EngineRule.ENGINE_BUFFER_SLOT_CAPACITY_NAME; import static java.util.concurrent.TimeUnit.SECONDS; @@ -69,10 +67,8 @@ public class ConnectionIT @Specification({ "${net}/connect.successful/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldConnect() throws Exception { k3po.finish(); @@ -84,10 +80,8 @@ public void shouldConnect() throws Exception "${net}/connect.username.authentication.successful/client", "${app}/connect.authorize.publish.one.message/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldAuthenticateUsernameAndConnect() throws Exception { k3po.finish(); @@ -98,10 +92,8 @@ public void shouldAuthenticateUsernameAndConnect() throws Exception @Specification({ "${net}/connect.username.authentication.failed/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFailUsernameAuthentication() throws Exception { k3po.finish(); @@ -112,10 +104,8 @@ public void shouldFailUsernameAuthentication() throws Exception @Specification({ "${net}/connect.password.authentication.successful/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldAuthenticatePasswordAndConnect() throws Exception { k3po.finish(); @@ -126,10 +116,8 @@ public void shouldAuthenticatePasswordAndConnect() throws Exception @Specification({ "${net}/connect.password.authentication.failed/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFailPasswordAuthentication() throws Exception { k3po.finish(); @@ -140,10 +128,8 @@ public void shouldFailPasswordAuthentication() throws Exception @Specification({ "${net}/connect.server.assigned.client.id/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldConnectWithServerAssignedClientId() throws Exception { k3po.finish(); @@ -154,10 +140,8 @@ public void shouldConnectWithServerAssignedClientId() throws Exception @Specification({ "${net}/connect.reject.missing.client.id/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectMissingClientId() throws Exception { k3po.finish(); @@ -168,10 +152,8 @@ public void shouldRejectMissingClientId() throws Exception @Specification({ "${net}/disconnect/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldConnectThenDisconnect() throws Exception { k3po.finish(); @@ -183,10 +165,8 @@ public void shouldConnectThenDisconnect() throws Exception "${net}/disconnect.after.subscribe.and.publish/client", "${app}/disconnect.after.subscribe.and.publish/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldDisconnectAfterSubscribeAndPublish() throws Exception { k3po.finish(); @@ -198,10 +178,8 @@ public void shouldDisconnectAfterSubscribeAndPublish() throws Exception @Specification({ "${net}/connect.invalid.protocol.version/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectInvalidProtocolVersion() throws Exception { k3po.finish(); @@ -212,10 +190,8 @@ public void shouldRejectInvalidProtocolVersion() throws Exception @Specification({ "${net}/connect.invalid.flags/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectMalformedConnectPacket() throws Exception { k3po.finish(); @@ -226,10 +202,8 @@ public void shouldRejectMalformedConnectPacket() throws Exception @Specification({ "${net}/connect.invalid.authentication.method/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectBadAuthenticationMethod() throws Exception { k3po.finish(); @@ -240,10 +214,8 @@ public void shouldRejectBadAuthenticationMethod() throws Exception @Specification({ "${net}/disconnect.reject.invalid.fixed.header.flags/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectMalformedDisconnectPacket() throws Exception { k3po.finish(); @@ -255,10 +227,8 @@ public void shouldRejectMalformedDisconnectPacket() throws Exception @Specification({ "${net}/connect.reject.second.connect/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectSecondConnectPacket() throws Exception { k3po.finish(); @@ -269,10 +239,8 @@ public void shouldRejectSecondConnectPacket() throws Exception @Specification({ "${net}/connect.successful.fragmented/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldConnectFragmented() throws Exception { k3po.finish(); @@ -284,10 +252,8 @@ public void shouldConnectFragmented() throws Exception @Specification({ "${net}/connect.reject.other.packet.before.connect/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectOtherPacketBeforeConnect() throws Exception { k3po.finish(); @@ -298,10 +264,8 @@ public void shouldRejectOtherPacketBeforeConnect() throws Exception @Specification({ "${net}/connect.reject.topic.alias.maximum.repeated/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWhenTopicAliasMaximumRepeated() throws Exception { k3po.finish(); @@ -313,10 +277,8 @@ public void shouldRejectConnectWhenTopicAliasMaximumRepeated() throws Exception "${net}/client.sent.close/client", "${app}/client.sent.abort/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveClientSentClose() throws Exception { k3po.finish(); @@ -328,10 +290,8 @@ public void shouldReceiveClientSentClose() throws Exception "${net}/client.sent.abort/client", "${app}/client.sent.abort/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveClientSentAbort() throws Exception { k3po.finish(); @@ -343,10 +303,8 @@ public void shouldReceiveClientSentAbort() throws Exception "${net}/client.sent.reset/client", "${app}/client.sent.abort/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveClientSentReset() throws Exception { k3po.finish(); @@ -357,10 +315,8 @@ public void shouldReceiveClientSentReset() throws Exception @Specification({ "${net}/disconnect.after.keep.alive.timeout/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "1") public void shouldDisconnectClientAfterKeepAliveTimeout() throws Exception { @@ -372,7 +328,6 @@ public void shouldDisconnectClientAfterKeepAliveTimeout() throws Exception @Specification({ "${net}/connect.timeout.before.connect/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = CONNECT_TIMEOUT_NAME, value = "1") public void shouldTimeoutBeforeConnect() throws Exception { @@ -383,10 +338,8 @@ public void shouldTimeoutBeforeConnect() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.maximum.qos.0/client"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SESSION_AVAILABLE_NAME, value = "false") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldConnectWithMaximumQos0() throws Exception { k3po.finish(); @@ -396,12 +349,10 @@ public void shouldConnectWithMaximumQos0() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.retain.not.supported/client"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SESSION_AVAILABLE_NAME, value = "false") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = RETAIN_AVAILABLE_NAME, value = "false") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldConnectWithRetainNotSupported() throws Exception { k3po.finish(); @@ -411,12 +362,10 @@ public void shouldConnectWithRetainNotSupported() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.will.retain.not.supported/client"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SESSION_AVAILABLE_NAME, value = "false") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = RETAIN_AVAILABLE_NAME, value = "false") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWillRetainNotSupported() throws Exception { k3po.finish(); @@ -427,10 +376,8 @@ public void shouldRejectConnectWillRetainNotSupported() throws Exception @Specification({ "${net}/connect.reject.password.flag.no.password/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWithPasswordFlagSetNoPassword() throws Exception { k3po.finish(); @@ -442,10 +389,8 @@ public void shouldRejectConnectWithPasswordFlagSetNoPassword() throws Exception @Specification({ "${net}/connect.reject.password.no.password.flag/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWithPasswordNoPasswordFlag() throws Exception { k3po.finish(); @@ -456,10 +401,8 @@ public void shouldRejectConnectWithPasswordNoPasswordFlag() throws Exception @Specification({ "${net}/connect.reject.username.flag.only/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWithUsernameFlagNoUsername() throws Exception { k3po.finish(); @@ -471,10 +414,8 @@ public void shouldRejectConnectWithUsernameFlagNoUsername() throws Exception @Specification({ "${net}/connect.reject.username.flag.missing/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWithUsernameNoUsernameFlag() throws Exception { k3po.finish(); @@ -485,10 +426,8 @@ public void shouldRejectConnectWithUsernameNoUsernameFlag() throws Exception @Specification({ "${net}/connect.reject.will.payload.missing/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWillPayloadMissing() throws Exception { k3po.finish(); @@ -499,10 +438,8 @@ public void shouldRejectConnectWillPayloadMissing() throws Exception @Specification({ "${net}/connect.reject.will.properties.missing/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWillPropertiesMissing() throws Exception { k3po.finish(); @@ -513,10 +450,8 @@ public void shouldRejectConnectWillPropertiesMissing() throws Exception @Specification({ "${net}/connect.reject.will.topic.missing/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectConnectWillTopicMissing() throws Exception { k3po.finish(); @@ -527,10 +462,8 @@ public void shouldRejectConnectWillTopicMissing() throws Exception @Specification({ "${net}/connect.will.invalid.will.qos/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectInvalidWillQos() throws Exception { k3po.finish(); @@ -540,10 +473,8 @@ public void shouldRejectInvalidWillQos() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.will.reject.will.qos.1.without.will.flag/client"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectWillQos1WithoutWillFlag() throws Exception { k3po.finish(); @@ -554,10 +485,8 @@ public void shouldRejectWillQos1WithoutWillFlag() throws Exception @Specification({ "${net}/connect.will.reject.will.qos.2.without.will.flag/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectWillQos2WithoutWillFlag() throws Exception { k3po.finish(); @@ -568,10 +497,8 @@ public void shouldRejectWillQos2WithoutWillFlag() throws Exception @Specification({ "${net}/connect.will.reject.will.retain.without.will.flag/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectWillRetainWithoutWillFlag() throws Exception { k3po.finish(); @@ -583,10 +510,8 @@ public void shouldRejectWillRetainWithoutWillFlag() throws Exception "${net}/connect.max.packet.size.exceeded/client", "${app}/connect.max.packet.size.exceeded/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldIgnorePublishPacketBiggerThanMaxPacketSize() throws Exception { k3po.finish(); @@ -597,10 +522,8 @@ public void shouldIgnorePublishPacketBiggerThanMaxPacketSize() throws Exception @Specification({ "${net}/connect.server.defined.keep.alive/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "10") public void shouldConnectWithServerDefinedKeepAlive() throws Exception { @@ -616,10 +539,8 @@ public void shouldConnectWithServerDefinedKeepAlive() throws Exception "${net}/connect.subscribe.unfragmented/client", "${app}/subscribe.topic.filter.single.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "10") public void shouldConnectAndSubscribeUnfragmented() throws Exception { @@ -631,13 +552,23 @@ public void shouldConnectAndSubscribeUnfragmented() throws Exception @Specification({ "${net}/connect.reject.packet.too.large/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = ENGINE_BUFFER_SLOT_CAPACITY_NAME, value = "8192") public void shouldRejectPacketTooLarge() throws Exception { k3po.finish(); } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/disconnect.invalid.session.expiry/client"}) + @Configure(name = SESSION_AVAILABLE_NAME, value = "false") + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + public void shouldRejectInvalidSessionExpiryOnDisconnect() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PingIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PingIT.java index bb76bea42c..f58ff08a3e 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PingIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PingIT.java @@ -19,9 +19,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.KEEP_ALIVE_MINIMUM_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_EXPIRY_INTERVAL_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -65,10 +63,8 @@ public class PingIT @Specification({ "${net}/ping/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldConnectThenPingRequestResponse() throws Exception { k3po.finish(); @@ -80,10 +76,8 @@ public void shouldConnectThenPingRequestResponse() throws Exception "${net}/ping.keep.alive/client", "${app}/subscribe.topic.filter.single.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "1") public void shouldPingAtKeepAliveInterval() throws Exception { diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java index 078a49afc9..78a377f0d7 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java @@ -20,10 +20,8 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.PUBLISH_TIMEOUT_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.RETAIN_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_EXPIRY_INTERVAL_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.TOPIC_ALIAS_MAXIMUM_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static io.aklivity.zilla.runtime.engine.test.EngineRule.ENGINE_BUFFER_SLOT_CAPACITY_NAME; import static java.util.concurrent.TimeUnit.SECONDS; @@ -70,10 +68,8 @@ public class PublishIT "${net}/publish.one.message/client", "${app}/publish.one.message/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishOneMessage() throws Exception { k3po.finish(); @@ -85,10 +81,8 @@ public void shouldPublishOneMessage() throws Exception "${net}/publish.retained/client", "${app}/publish.retained/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishRetainedMessage() throws Exception { k3po.finish(); @@ -100,11 +94,9 @@ public void shouldPublishRetainedMessage() throws Exception "${net}/publish.message.with.topic.alias/client", "${app}/publish.message.with.topic.alias/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishMessageWithTopicAlias() throws Exception { k3po.finish(); @@ -116,10 +108,8 @@ public void shouldPublishMessageWithTopicAlias() throws Exception "${net}/publish.multiple.messages/client", "${app}/publish.multiple.messages/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishMultipleMessages() throws Exception { k3po.finish(); @@ -131,10 +121,8 @@ public void shouldPublishMultipleMessages() throws Exception "${net}/publish.multiple.messages.unfragmented/client", "${app}/publish.multiple.messages/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishMultipleMessagesUnfragmented() throws Exception { k3po.finish(); @@ -146,10 +134,8 @@ public void shouldPublishMultipleMessagesUnfragmented() throws Exception "${net}/publish.one.message.subscribe.unfragmented/client", "${app}/publish.one.message.subscribe.unfragmented/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishOneMessageSubscribeUnfragmented() throws Exception { k3po.finish(); @@ -161,10 +147,8 @@ public void shouldPublishOneMessageSubscribeUnfragmented() throws Exception "${net}/publish.multiple.messages.with.delay/client", "${app}/publish.multiple.messages/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = PUBLISH_TIMEOUT_NAME, value = "5") public void shouldPublishMultipleMessagesWithDelay() throws Exception { @@ -181,11 +165,9 @@ public void shouldPublishMultipleMessagesWithDelay() throws Exception "${net}/publish.messages.with.topic.alias.distinct/client", "${app}/publish.messages.with.topic.alias.distinct/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishMessagesWithTopicAliasDistinct() throws Exception { k3po.finish(); @@ -197,11 +179,9 @@ public void shouldPublishMessagesWithTopicAliasDistinct() throws Exception "${net}/publish.messages.with.topic.alias.repeated/client", "${app}/publish.messages.with.topic.alias.repeated/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishMessagesWithTopicAliasRepeated() throws Exception { k3po.finish(); @@ -213,11 +193,9 @@ public void shouldPublishMessagesWithTopicAliasRepeated() throws Exception "${net}/publish.messages.with.topic.alias.replaced/client", "${app}/publish.messages.with.topic.alias.replaced/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "1") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishMessagesWithTopicAliasReplaced() throws Exception { k3po.finish(); @@ -229,11 +207,9 @@ public void shouldPublishMessagesWithTopicAliasReplaced() throws Exception "${net}/publish.messages.with.topic.alias.invalid.scope/client", "${app}/publish.messages.with.topic.alias.invalid.scope/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "1") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldSendMessagesWithTopicAliasInvalidScope() throws Exception { k3po.finish(); @@ -244,10 +220,8 @@ public void shouldSendMessagesWithTopicAliasInvalidScope() throws Exception @Specification({ "${net}/publish.topic.not.routed/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectTopicNotRouted() throws Exception { k3po.finish(); @@ -258,10 +232,8 @@ public void shouldRejectTopicNotRouted() throws Exception @Specification({ "${net}/publish.reject.topic.alias.exceeds.maximum/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublishWhenTopicAliasExceedsMaximum() throws Exception { k3po.finish(); @@ -272,11 +244,9 @@ public void shouldRejectPublishWhenTopicAliasExceedsMaximum() throws Exception @Specification({ "${net}/publish.reject.topic.alias.repeated/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublishWithMultipleTopicAliases() throws Exception { k3po.finish(); @@ -287,11 +257,9 @@ public void shouldRejectPublishWithMultipleTopicAliases() throws Exception @Specification({ "${net}/publish.reject.client.sent.subscription.id/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublishClientSentSubscriptionId() throws Exception { k3po.finish(); @@ -302,11 +270,9 @@ public void shouldRejectPublishClientSentSubscriptionId() throws Exception @Specification({ "${net}/publish.reject.invalid.payload.format/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublishInvalidPayloadFormat() throws Exception { k3po.finish(); @@ -317,11 +283,9 @@ public void shouldRejectPublishInvalidPayloadFormat() throws Exception @Specification({ "${net}/publish.reject.qos1.not.supported/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "0") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublisQos1NotSupported() throws Exception { k3po.finish(); @@ -332,11 +296,9 @@ public void shouldRejectPublisQos1NotSupported() throws Exception @Specification({ "${net}/publish.reject.qos2.not.supported/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "0") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublisQos2NotSupported() throws Exception { k3po.finish(); @@ -348,11 +310,9 @@ public void shouldRejectPublisQos2NotSupported() throws Exception @Specification({ "${net}/publish.reject.qos0.with.packet.id/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublishQos0WithPacketId() throws Exception { k3po.finish(); @@ -364,11 +324,9 @@ public void shouldRejectPublishQos0WithPacketId() throws Exception @Specification({ "${net}/publish.reject.qos1.without.packet.id/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublishQos1WithoutPacketId() throws Exception { k3po.finish(); @@ -380,11 +338,9 @@ public void shouldRejectPublishQos1WithoutPacketId() throws Exception @Specification({ "${net}/publish.reject.qos2.without.packet.id/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublishQos2WithoutPacketId() throws Exception { k3po.finish(); @@ -399,7 +355,6 @@ public void shouldRejectPublishQos2WithoutPacketId() throws Exception @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectPublishRetainNotSupported() throws Exception { k3po.finish(); @@ -411,10 +366,8 @@ public void shouldRejectPublishRetainNotSupported() throws Exception "${net}/publish.with.user.property/client", "${app}/publish.with.user.property/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishWithUserProperty() throws Exception { k3po.finish(); @@ -426,10 +379,8 @@ public void shouldPublishWithUserProperty() throws Exception "${net}/publish.with.user.properties.distinct/client", "${app}/publish.with.user.properties.distinct/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishWithDistinctUserProperties() throws Exception { k3po.finish(); @@ -441,10 +392,8 @@ public void shouldPublishWithDistinctUserProperties() throws Exception "${net}/publish.with.user.properties.repeated/client", "${app}/publish.with.user.properties.repeated/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishWithRepeatedUserProperties() throws Exception { k3po.finish(); @@ -456,10 +405,8 @@ public void shouldPublishWithRepeatedUserProperties() throws Exception "${net}/publish.empty.retained.message/client", "${app}/publish.empty.retained.message/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishEmptyRetainedMessage() throws Exception { k3po.finish(); @@ -471,10 +418,8 @@ public void shouldPublishEmptyRetainedMessage() throws Exception "${net}/publish.empty.message/client", "${app}/publish.empty.message/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldPublishEmptyMessage() throws Exception { k3po.finish(); @@ -485,10 +430,8 @@ public void shouldPublishEmptyMessage() throws Exception @Specification({ "${net}/publish.reject.packet.too.large/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = ENGINE_BUFFER_SLOT_CAPACITY_NAME, value = "8192") public void shouldRejectPacketTooLarge() throws Exception { diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java index 9937e3a2b1..afa0169aa8 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java @@ -19,9 +19,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.KEEP_ALIVE_MINIMUM_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SERVER_REFERENCE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_EXPIRY_INTERVAL_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -65,24 +63,32 @@ public class SessionIT @Specification({ "${net}/session.connect.with.session.expiry/client", "${app}/session.connect.with.session.expiry/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldConnectWithSessionExpiry() throws Exception { k3po.finish(); } + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/session.connect.override.session.expiry/client", + "${app}/session.connect.override.session.expiry/server"}) + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + public void shouldConnectServerOverridesSessionExpiry() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.yaml") @Specification({ "${net}/session.subscribe/client", "${app}/session.subscribe/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldSubscribeSaveSubscriptionsInSession() throws Exception { k3po.finish(); @@ -93,10 +99,8 @@ public void shouldSubscribeSaveSubscriptionsInSession() throws Exception @Specification({ "${net}/session.subscribe.multiple.isolated/client", "${app}/session.subscribe.multiple.isolated/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception { k3po.finish(); @@ -107,10 +111,8 @@ public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception @Specification({ "${net}/session.subscribe.via.session.state/client", "${app}/session.subscribe.via.session.state/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldSubscribeViaSessionState() throws Exception { k3po.finish(); @@ -121,10 +123,8 @@ public void shouldSubscribeViaSessionState() throws Exception @Specification({ "${net}/session.unsubscribe.after.subscribe/client", "${app}/session.unsubscribe.after.subscribe/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldUnsubscribeSaveNewSessionState() throws Exception { k3po.finish(); @@ -135,10 +135,8 @@ public void shouldUnsubscribeSaveNewSessionState() throws Exception @Specification({ "${net}/session.unsubscribe.after.subscribe.deferred/client", "${app}/session.unsubscribe.after.subscribe.deferred/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldUnsubscribeAfterSubscribeDeferred() throws Exception { k3po.finish(); @@ -149,10 +147,8 @@ public void shouldUnsubscribeAfterSubscribeDeferred() throws Exception @Specification({ "${net}/session.subscribe/client", "${app}/session.unsubscribe.via.session.state/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldUnsubscribeViaSessionState() throws Exception { k3po.finish(); @@ -163,10 +159,8 @@ public void shouldUnsubscribeViaSessionState() throws Exception @Specification({ "${net}/session.will.message.retain/client", "${app}/session.will.message.retain/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldStoreWillMessageInSessionState() throws Exception { k3po.finish(); @@ -177,10 +171,8 @@ public void shouldStoreWillMessageInSessionState() throws Exception @Specification({ "${net}/session.connect.payload.fragmented/client", "${app}/session.will.message.retain/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldStoreWillMessageInSessionStatePayloadFragmented() throws Exception { k3po.finish(); @@ -192,10 +184,8 @@ public void shouldStoreWillMessageInSessionStatePayloadFragmented() throws Excep @Specification({ "${net}/session.will.message.normal.disconnect/client", "${app}/session.will.message.normal.disconnect/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldCloseSessionNormalDisconnect() throws Exception { k3po.finish(); @@ -206,10 +196,8 @@ public void shouldCloseSessionNormalDisconnect() throws Exception @Specification({ "${net}/session.will.message.disconnect.with.will.message/client", "${app}/session.will.message.abort/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldCloseSessionDisconnectWithWill() throws Exception { k3po.finish(); @@ -220,10 +208,8 @@ public void shouldCloseSessionDisconnectWithWill() throws Exception @Specification({ "${net}/session.will.message.no.ping.within.keep.alive/client", "${app}/session.will.message.abort/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "1") public void shouldCloseSessionWithKeepAliveExpired() throws Exception { @@ -235,10 +221,8 @@ public void shouldCloseSessionWithKeepAliveExpired() throws Exception @Specification({ "${net}/session.exists.clean.start/client", "${app}/session.exists.clean.start/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldCloseExistingConnectionCleanStart() throws Exception { k3po.finish(); @@ -249,10 +233,8 @@ public void shouldCloseExistingConnectionCleanStart() throws Exception @Specification({ "${net}/session.abort.reconnect.non.clean.start/client", "${app}/session.abort.reconnect.non.clean.start/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldClientAbortAndReconnectWithNonCleanStart() throws Exception { k3po.finish(); @@ -263,10 +245,8 @@ public void shouldClientAbortAndReconnectWithNonCleanStart() throws Exception @Specification({ "${net}/session.client.takeover/client", "${app}/session.client.takeover/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") public void shouldClientTakeOverSession() throws Exception { k3po.finish(); @@ -277,11 +257,9 @@ public void shouldClientTakeOverSession() throws Exception @Specification({ "${net}/session.server.redirect.after.connack/client", "${app}/session.server.redirect.after.connack/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") - @Configure(name = SERVER_REFERENCE_NAME, value = "localhost:1883") + @Configure(name = SERVER_REFERENCE_NAME, value = "mqtt-1.example.com:1883") public void shouldRedirectAfterConnack() throws Exception { k3po.finish(); @@ -292,11 +270,9 @@ public void shouldRedirectAfterConnack() throws Exception @Specification({ "${net}/session.server.redirect.before.connack/client", "${app}/session.server.redirect.before.connack/server"}) - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "10") - @Configure(name = SERVER_REFERENCE_NAME, value = "localhost:1883") + @Configure(name = SERVER_REFERENCE_NAME, value = "mqtt-1.example.com:1883") public void shouldRedirectBeforeConnack() throws Exception { k3po.finish(); diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SubscribeIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SubscribeIT.java index ecd016e1ed..1b27d63092 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SubscribeIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SubscribeIT.java @@ -19,7 +19,6 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.NO_LOCAL_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_EXPIRY_INTERVAL_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SUBSCRIPTION_IDENTIFIERS_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; @@ -67,10 +66,8 @@ public class SubscribeIT "${net}/subscribe.one.message/client", "${app}/subscribe.one.message/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveOneMessage() throws Exception { k3po.finish(); @@ -82,10 +79,8 @@ public void shouldReceiveOneMessage() throws Exception "${net}/subscribe.one.message.receive.response.topic.and.correlation.data/client", "${app}/subscribe.one.message.receive.response.topic.and.correlation.data/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveCorrelationData() throws Exception { k3po.finish(); @@ -97,10 +92,8 @@ public void shouldReceiveCorrelationData() throws Exception "${net}/subscribe.one.message.user.properties.unaltered/client", "${app}/subscribe.one.message.user.properties.unaltered/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveOneMessageWithUserPropertiesUnaltered() throws Exception { k3po.finish(); @@ -111,10 +104,8 @@ public void shouldReceiveOneMessageWithUserPropertiesUnaltered() throws Exceptio @Specification({ "${net}/subscribe.one.message.with.invalid.subscription.id/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveOneMessageWithInvalidSubscriptionId() throws Exception { k3po.finish(); @@ -126,10 +117,8 @@ public void shouldReceiveOneMessageWithInvalidSubscriptionId() throws Exception "${net}/subscribe.topic.filter.single.exact/client", "${app}/subscribe.topic.filter.single.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterExact() throws Exception { k3po.finish(); @@ -141,10 +130,8 @@ public void shouldFilterExact() throws Exception "${net}/subscribe.topic.filter.multi.level.wildcard/client", "${app}/subscribe.topic.filter.multi.level.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterMultiLevelWildcard() throws Exception { k3po.finish(); @@ -156,10 +143,8 @@ public void shouldFilterMultiLevelWildcard() throws Exception "${net}/subscribe.topic.filter.single.and.multi.level.wildcard/client", "${app}/subscribe.topic.filter.single.and.multi.level.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterSingleAndMultiLevelWildcard() throws Exception { k3po.finish(); @@ -171,10 +156,8 @@ public void shouldFilterSingleAndMultiLevelWildcard() throws Exception "${net}/subscribe.topic.filter.single.level.wildcard/client", "${app}/subscribe.topic.filter.single.level.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterSingleLevelWildcard() throws Exception { k3po.finish(); @@ -186,10 +169,8 @@ public void shouldFilterSingleLevelWildcard() throws Exception "${net}/subscribe.topic.filter.two.single.level.wildcard/client", "${app}/subscribe.topic.filter.two.single.level.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterTwoSingleLevelWildcard() throws Exception { k3po.finish(); @@ -201,10 +182,8 @@ public void shouldFilterTwoSingleLevelWildcard() throws Exception "${net}/subscribe.topic.filters.aggregated.both.exact/client", "${app}/subscribe.topic.filters.aggregated.both.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterAggregatedBothExact() throws Exception { k3po.finish(); @@ -216,10 +195,8 @@ public void shouldFilterAggregatedBothExact() throws Exception "${net}/subscribe.topic.filters.isolated.both.exact/client", "${app}/subscribe.topic.filters.isolated.both.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterIsolatedBothExact() throws Exception { k3po.finish(); @@ -231,10 +208,8 @@ public void shouldFilterIsolatedBothExact() throws Exception "${net}/subscribe.topic.filters.isolated.both.wildcard/client", "${app}/subscribe.topic.filters.isolated.both.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterIsolatedBothWildcard() throws Exception { k3po.finish(); @@ -246,10 +221,8 @@ public void shouldFilterIsolatedBothWildcard() throws Exception "${net}/subscribe.topic.filters.aggregated.exact.and.wildcard/client", "${app}/subscribe.topic.filters.aggregated.exact.and.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterAggregatedExactAndWildcard() throws Exception { k3po.finish(); @@ -261,10 +234,8 @@ public void shouldFilterAggregatedExactAndWildcard() throws Exception "${net}/subscribe.topic.filters.disjoint.wildcards/client", "${app}/subscribe.topic.filters.disjoint.wildcards/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterDisjointWildcard() throws Exception { k3po.finish(); @@ -276,10 +247,8 @@ public void shouldFilterDisjointWildcard() throws Exception "${net}/subscribe.topic.filters.isolated.exact.and.wildcard/client", "${app}/subscribe.topic.filters.isolated.exact.and.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterIsolatedExactAndWildcard() throws Exception { k3po.finish(); @@ -291,10 +260,8 @@ public void shouldFilterIsolatedExactAndWildcard() throws Exception "${net}/subscribe.topic.filters.overlapping.wildcards/client", "${app}/subscribe.topic.filters.overlapping.wildcards/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldFilterOverlappingWildcard() throws Exception { k3po.finish(); @@ -306,10 +273,8 @@ public void shouldFilterOverlappingWildcard() throws Exception "${net}/subscribe.get.retained.as.published/client", "${app}/subscribe.get.retained.as.published/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveRetainedAsPublished() throws Exception { k3po.finish(); @@ -321,10 +286,8 @@ public void shouldReceiveRetainedAsPublished() throws Exception "${net}/subscribe.qos0.publish.retained.no.replay/client", "${app}/subscribe.qos0.publish.retained.no.replay/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldNotReplayRetained() throws Exception { k3po.finish(); @@ -336,10 +299,8 @@ public void shouldNotReplayRetained() throws Exception "${net}/subscribe.qos0.replay.retained.no.packet.id/client", "${app}/subscribe.qos0.replay.retained.no.packet.id/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveAndReplayRetainedQos0() throws Exception { k3po.finish(); @@ -352,7 +313,6 @@ public void shouldReceiveAndReplayRetainedQos0() throws Exception @Configure(name = SESSION_AVAILABLE_NAME, value = "false") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") @Configure(name = NO_LOCAL_NAME, value = "false") public void shouldRejectNoLocal() throws Exception { @@ -365,10 +325,8 @@ public void shouldRejectNoLocal() throws Exception "${net}/subscribe.receive.message/client", "${app}/subscribe.receive.message/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveOneMessageAfterPublish() throws Exception { k3po.finish(); @@ -380,10 +338,8 @@ public void shouldReceiveOneMessageAfterPublish() throws Exception "${net}/subscribe.receive.message.overlapping.wildcard/client", "${app}/subscribe.receive.message.overlapping.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveMessageOverlappingWildcard() throws Exception { k3po.finish(); @@ -395,10 +351,8 @@ public void shouldReceiveMessageOverlappingWildcard() throws Exception "${net}/subscribe.receive.message.wildcard/client", "${app}/subscribe.receive.message.wildcard/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveOneMessageWithPatternTopic() throws Exception { k3po.finish(); @@ -410,10 +364,8 @@ public void shouldReceiveOneMessageWithPatternTopic() throws Exception "${net}/subscribe.retain.as.published/client", "${app}/subscribe.retain.as.published/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldReceiveRetainAsPublished() throws Exception { k3po.finish(); @@ -425,10 +377,8 @@ public void shouldReceiveRetainAsPublished() throws Exception "${net}/subscribe.publish.no.local/client", "${app}/subscribe.publish.no.local/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldNotReceivePublishLocal() throws Exception { k3po.finish(); @@ -439,10 +389,8 @@ public void shouldNotReceivePublishLocal() throws Exception @Specification({ "${net}/subscribe.invalid.fixed.header.flags/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectMalformedPacket() throws Exception { k3po.finish(); @@ -453,10 +401,8 @@ public void shouldRejectMalformedPacket() throws Exception @Specification({ "${net}/subscribe.invalid.topic.filter/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectInvalidTopicFilter() throws Exception { k3po.finish(); @@ -470,7 +416,6 @@ public void shouldRejectInvalidTopicFilter() throws Exception @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "false") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectWildcardSubscriptionsNotSupported() throws Exception { k3po.finish(); @@ -481,11 +426,9 @@ public void shouldRejectWildcardSubscriptionsNotSupported() throws Exception @Specification({ "${net}/subscribe.reject.subscription.ids.not.supported/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SUBSCRIPTION_IDENTIFIERS_AVAILABLE_NAME, value = "false") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectSubscriptionIdentifiersNotSupported() throws Exception { k3po.finish(); @@ -496,10 +439,8 @@ public void shouldRejectSubscriptionIdentifiersNotSupported() throws Exception @Specification({ "${net}/subscribe.reject.shared.subscriptions.not.supported/client"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "false") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectSharedSubscriptionsNotSupported() throws Exception { k3po.finish(); diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/UnsubscribeIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/UnsubscribeIT.java index 12830e9772..4eb2ffbb0b 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/UnsubscribeIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/UnsubscribeIT.java @@ -18,9 +18,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_EXPIRY_INTERVAL_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -65,10 +63,8 @@ public class UnsubscribeIT "${net}/unsubscribe.after.subscribe/client", "${app}/unsubscribe.after.subscribe/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldAcknowledge() throws Exception { k3po.finish(); @@ -80,10 +76,8 @@ public void shouldAcknowledge() throws Exception "${net}/unsubscribe.topic.filter.single/client", "${app}/unsubscribe.topic.filter.single/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldAcknowledgeSingleTopicFilters() throws Exception { k3po.finish(); @@ -95,10 +89,8 @@ public void shouldAcknowledgeSingleTopicFilters() throws Exception "${net}/unsubscribe.publish.unfragmented/client", "${app}/unsubscribe.publish.unfragmented/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldAcknowledgeAndPublishUnfragmented() throws Exception { k3po.finish(); @@ -110,10 +102,8 @@ public void shouldAcknowledgeAndPublishUnfragmented() throws Exception "${net}/unsubscribe.aggregated.topic.filters.both.exact/client", "${app}/unsubscribe.aggregated.topic.filters.both.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldAcknowledgeAggregatedTopicFiltersBothExact() throws Exception { k3po.finish(); @@ -125,10 +115,8 @@ public void shouldAcknowledgeAggregatedTopicFiltersBothExact() throws Exception "${net}/unsubscribe.no.matching.subscription/client", "${app}/subscribe.topic.filter.single.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldAcknowledgeNoMatchingSubscription() throws Exception { k3po.finish(); @@ -140,10 +128,8 @@ public void shouldAcknowledgeNoMatchingSubscription() throws Exception "${net}/unsubscribe.reject.invalid.fixed.header.flags/client", "${app}/subscribe.topic.filter.single.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectMalformedPacket() throws Exception { k3po.finish(); @@ -155,10 +141,8 @@ public void shouldRejectMalformedPacket() throws Exception "${net}/unsubscribe.reject.missing.packet.id/client", "${app}/subscribe.topic.filter.single.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectWithoutPacketId() throws Exception { k3po.finish(); @@ -170,10 +154,8 @@ public void shouldRejectWithoutPacketId() throws Exception "${net}/unsubscribe.reject.no.topic.filter/client", "${app}/subscribe.topic.filter.single.exact/server"}) @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = SESSION_EXPIRY_INTERVAL_NAME, value = "0") public void shouldRejectNoTopicFilter() throws Exception { k3po.finish(); From cb874f5cdadf52239d4f1760a44d65a1e949bbf6 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Tue, 5 Sep 2023 22:49:54 +0200 Subject: [PATCH 077/115] Adapt to consumer group changes (#394) * Merged consumer group support (#390) * Adapt to consumer group changes * Change write notify to read * Send init empty data to trigger syncgroup * Add new test --------- Co-authored-by: Akram Yakubov --- .../client.rpt | 8 +- .../server.rpt | 6 +- .../client.rpt | 22 +- .../server.rpt | 14 +- .../session.client.sent.reset/client.rpt | 10 +- .../session.client.sent.reset/server.rpt | 6 +- .../kafka/session.client.takeover/client.rpt | 31 +- .../kafka/session.client.takeover/server.rpt | 26 +- .../client.rpt | 9 +- .../server.rpt | 6 +- .../client.rpt | 15 +- .../server.rpt | 15 +- .../client.rpt | 15 +- .../server.rpt | 15 +- .../session.exists.clean.start/client.rpt | 32 +- .../session.exists.clean.start/server.rpt | 23 +- .../client.rpt | 7 +- .../server.rpt | 6 +- .../streams/kafka/session.redirect/client.rpt | 7 +- .../streams/kafka/session.redirect/server.rpt | 6 +- .../session.server.sent.reset/client.rpt | 11 +- .../session.server.sent.reset/server.rpt | 6 +- .../client.rpt | 9 +- .../server.rpt | 6 +- .../kafka/session.subscribe/client.rpt | 11 +- .../kafka/session.subscribe/server.rpt | 6 +- .../client.rpt | 10 +- .../server.rpt | 6 +- .../client.rpt | 10 +- .../server.rpt | 6 +- .../client.rpt | 24 +- .../server.rpt | 23 +- .../client.rpt | 24 +- .../server.rpt | 24 +- .../client.rpt | 24 +- .../server.rpt | 23 +- .../client.rpt | 24 +- .../server.rpt | 23 +- .../client.rpt | 28 +- .../server.rpt | 29 +- .../client.rpt | 24 +- .../server.rpt | 23 +- .../client.rpt | 79 +- .../server.rpt | 79 +- .../client.rpt | 23 +- .../server.rpt | 23 +- .../client.rpt | 36 +- .../server.rpt | 36 +- .../subscribe.filter.change.retain/client.rpt | 23 +- .../subscribe.filter.change.retain/server.rpt | 23 +- .../client.rpt | 5 +- .../server.rpt | 1 + .../client.rpt | 3 +- .../server.rpt | 1 + .../client.rpt | 3 +- .../server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../internal/MqttKafkaConfiguration.java | 14 - .../stream/MqttKafkaPublishFactory.java | 5 +- .../stream/MqttKafkaSessionFactory.java | 155 +- .../stream/MqttKafkaSubscribeFactory.java | 22 +- .../internal/MqttKafkaConfigurationTest.java | 6 - .../stream/MqttKafkaSessionProxyIT.java | 5 - .../command/log/internal/LoggableStream.java | 35 +- .../stream/KafkaCacheClientFactory.java | 7 + .../stream/KafkaCacheConsumerFactory.java | 1022 ++++++++++ .../stream/KafkaCacheGroupFactory.java | 59 +- .../stream/KafkaCacheOffsetFetchFactory.java | 1046 +++++++++++ .../stream/KafkaCacheServerFactory.java | 7 + .../stream/KafkaClientConsumerFactory.java | 1322 +++++++++++++ .../internal/stream/KafkaClientFactory.java | 8 + .../stream/KafkaClientGroupFactory.java | 1242 +++++++++++-- .../stream/KafkaClientOffsetFetchFactory.java | 1644 +++++++++++++++++ .../internal/stream/KafkaMergedFactory.java | 440 ++++- .../stream/KafkaOffsetFetchTopic.java | 32 + .../binding-kafka/src/main/zilla/protocol.idl | 42 +- .../internal/stream/CacheConsumerIT.java | 64 + .../kafka/internal/stream/CacheMergedIT.java | 10 + .../internal/stream/CacheOffsetFetchIT.java | 68 + .../internal/stream/ClientConsumerIT.java | 64 + .../kafka/internal/stream/ClientGroupIT.java | 3 +- .../internal/stream/ClientOffsetFetchIT.java | 62 + .../kafka/produce/bidi.stream.rpc/client.rpt | 1 + .../kafka/produce/bidi.stream.rpc/server.rpt | 1 + .../produce/client.stream.rpc/client.rpt | 1 + .../produce/client.stream.rpc/server.rpt | 1 + .../produce/server.stream.rpc/client.rpt | 1 + .../produce/server.stream.rpc/server.rpt | 1 + .../kafka/produce/unary.rpc/client.rpt | 1 + .../kafka/produce/unary.rpc/server.rpt | 1 + .../kafka/get.item.modified/client.rpt | 1 + .../kafka/get.item.modified/server.rpt | 1 + .../get.item.no.etag.modified/client.rpt | 1 + .../get.item.no.etag.modified/server.rpt | 1 + .../kafka/get.items.modified/client.rpt | 1 + .../kafka/get.items.modified/server.rpt | 1 + .../kafka/get.items.write.flush/client.rpt | 1 + .../kafka/get.items.write.flush/server.rpt | 1 + .../kafka/streams/kafka/get.items/client.rpt | 1 + .../kafka/streams/kafka/get.items/server.rpt | 1 + .../kafka/internal/KafkaFunctions.java | 981 ++++++---- .../main/resources/META-INF/zilla/kafka.idl | 94 +- .../consumer/partition.assignment/client.rpt | 40 + .../consumer/partition.assignment/server.rpt | 48 + .../client.rpt | 9 +- .../server.rpt | 8 +- .../application/group/leader/client.rpt | 9 +- .../application/group/leader/server.rpt | 8 +- .../group/partition.assignment/client.rpt | 64 + .../group/partition.assignment/server.rpt | 69 + .../client.rpt | 20 +- .../server.rpt | 17 +- .../rebalance.protocol.highlander/client.rpt | 21 +- .../rebalance.protocol.highlander/server.rpt | 18 +- .../rebalance.protocol.unknown/client.rpt | 10 +- .../rebalance.protocol.unknown/server.rpt | 8 +- .../group/rebalance.sync.group/client.rpt | 65 + .../group/rebalance.sync.group/server.rpt | 69 + .../merged.fetch.filter.change/client.rpt | 2 + .../merged.fetch.filter.change/server.rpt | 2 + .../client.rpt | 1 + .../server.rpt | 3 +- .../client.rpt | 1 + .../server.rpt | 1 + .../merged.fetch.filter.none/client.rpt | 1 + .../merged.fetch.filter.none/server.rpt | 1 + .../merged.fetch.filter.sync/client.rpt | 1 + .../merged.fetch.filter.sync/server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../merged.fetch.message.values/client.rpt | 1 + .../merged.fetch.message.values/server.rpt | 1 + .../client.rpt | 45 + .../server.rpt | 51 + .../client.rpt | 4 + .../server.rpt | 16 +- .../merged.produce.flush.dynamic/client.rpt | 4 + .../merged.produce.flush.dynamic/server.rpt | 4 + .../merged/merged.produce.flush/client.rpt | 6 +- .../merged/merged.produce.flush/server.rpt | 4 + .../client.rpt | 170 ++ .../server.rpt | 168 ++ .../offset.commit/commit.offset/client.rpt | 37 + .../offset.commit/commit.offset/server.rpt | 43 + .../offset.fetch/partition.offset/client.rpt | 36 + .../offset.fetch/partition.offset/server.rpt | 41 + .../coordinator.not.available/client.rpt | 46 +- .../coordinator.not.available/server.rpt | 36 + .../client.rpt | 93 +- .../server.rpt | 72 + .../client.rpt | 52 +- .../server.rpt | 42 +- .../client.rpt | 105 +- .../server.rpt | 86 +- .../client.rpt | 54 +- .../server.rpt | 42 +- .../rebalance.protocol.highlander/client.rpt | 52 +- .../rebalance.protocol.highlander/server.rpt | 42 +- .../rebalance.protocol.unknown/client.rpt | 52 +- .../rebalance.protocol.unknown/server.rpt | 42 +- .../rebalance.sync.group/client.rpt | 52 +- .../rebalance.sync.group/server.rpt | 42 +- .../leader/client.rpt | 81 +- .../leader/server.rpt | 72 +- .../topic.offset.info/client.rpt | 49 + .../topic.offset.info/server.rpt | 46 + .../kafka/internal/KafkaFunctionsTest.java | 409 ++-- .../kafka/streams/application/ConsumerIT.java | 47 + .../kafka/streams/application/GroupIT.java | 19 +- .../kafka/streams/application/MergedIT.java | 18 + .../server.sent.messages.with.etag/client.rpt | 1 + .../server.sent.messages.with.etag/server.rpt | 1 + .../client.rpt | 1 + .../server.rpt | 1 + .../kafka/server.sent.messages/client.rpt | 1 + .../kafka/server.sent.messages/server.rpt | 1 + 177 files changed, 10924 insertions(+), 1331 deletions(-) create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaOffsetFetchTopic.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java create mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt create mode 100644 specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt index 917160940c..7df85f13d7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt @@ -151,17 +151,19 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -read zilla:data.null read notify RECEIVED_GROUP_MEMBERS_LEADER +write zilla:data.empty +write flush + write abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt index 13916ca7e9..074efefd62 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt @@ -153,16 +153,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # send group members (leader) -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} write flush +read zilla:data.empty + read aborted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt index aa4d4e79e6..8256a78bba 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt @@ -66,20 +66,23 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +write notify RECEIVED_LEADER1 + +write zilla:data.empty +write flush write abort -write notify GROUP1_ABORTED -connect await GROUP1_ABORTED +connect await RECEIVED_LEADER1 "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -276,18 +279,21 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +read notify RECEIVED_LEADER2 + +write zilla:data.empty +write flush -write notify GROUP2_FINISHED -connect await GROUP2_FINISHED +connect await RECEIVED_LEADER2 "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt index 35ab33205b..9d236ff7ad 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt @@ -62,17 +62,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} - write flush +read zilla:data.empty + read aborted write abort @@ -259,17 +260,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} - write flush +read zilla:data.empty + accepted read zilla:begin.ext ${kafka:matchBeginEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt index 4282c55601..a9bb4442fb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt @@ -66,20 +66,22 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} -read notify RECEIVED_LEADER_DATA +read notify RECEIVED_LEADER + +write zilla:data.empty read abort -connect await RECEIVED_LEADER_DATA +connect await RECEIVED_LEADER "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt index 7107c3c822..b743a3bcfb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt @@ -63,16 +63,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + write aborted accepted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt index 0112b51e6a..4c588f81a2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt @@ -66,33 +66,35 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +write zilla:data.empty write advise zilla:flush write notify HEARTBEAT1_SENT -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(2) + .members("consumer-1") + .members("consumer-2") .build() .build()} +read notify RECEIVED_LEAVE_GROUP_DATA write close -write notify GROUP1_CLOSED -connect await GROUP1_CLOSED +connect await RECEIVED_LEAVE_GROUP_DATA "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -332,29 +334,34 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-2") - .members(2) + .members("consumer-1") + .members("consumer-2") .build() .build()} +write zilla:data.empty + write advise zilla:flush -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-2") .memberId("consumer-2") - .members(1) + .members("consumer-2") .build() .build()} +read notify RECEIVED_LEADER2 + +write zilla:data.empty -write notify GROUP2_FINISHED -connect await GROUP2_FINISHED +connect await RECEIVED_LEADER2 "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt index 6398bd4f6d..386a69ea77 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt @@ -62,27 +62,30 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + read advised zilla:flush # On the session stream the heartbeat arrives (on the mqtt_sessions merged stream) read await HEARTBEAT1_SENT -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(2) + .members("consumer-1") + .members("consumer-2") .build() .build()} write flush @@ -322,33 +325,38 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-2") - .members(2) + .members("consumer-1") + .members("consumer-2") .build() .build()} write flush -# On the session publish stream, send a heartbeat +read zilla:data.empty +# On the session publish stream, send a heartbeat read advised zilla:flush + # Wait until I receive a data frame, that confirms that I'm the leader # Once it's confirmed, I can send the CONNACK -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-2") .memberId("consumer-2") - .members(1) + .members("consumer-2") .build() .build()} write flush +read zilla:data.empty + accepted read zilla:begin.ext ${kafka:matchBeginEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt index b4c4714a4c..7f37709283 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt @@ -151,17 +151,20 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -read zilla:data.null read notify RECEIVED_GROUP_MEMBERS_LEADER +write zilla:data.empty +write flush + + write close diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt index 41203a3ab6..ccdd5717f4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt @@ -153,16 +153,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # send group members (leader) -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} write flush +read zilla:data.empty + read closed diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt index 4207bee339..fba6e61b02 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt @@ -64,17 +64,28 @@ write zilla:begin.ext ${kafka:beginEx() .build() .build()} +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +write zilla:data.empty + write advise zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt index 3c44ddd9c4..ebbfc98724 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt @@ -60,18 +60,29 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + accepted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt index db1551265a..ea5829e300 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt @@ -64,17 +64,28 @@ write zilla:begin.ext ${kafka:beginEx() .build() .build()} +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(2000) + .build() + .build()} + connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +write zilla:data.empty + write advise zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt index ce6507d359..ca476f7775 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt @@ -60,18 +60,29 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(2000) + .build() + .build()} + connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + accepted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt index a1516d4ee6..1f4ad590ea 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt @@ -66,31 +66,34 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +write zilla:data.empty + write advise zilla:flush -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(2) + .members("consumer-1") + .members("consumer-2") .build() .build()} +read notify RECEIVED_LEAVE_GROUP_SIGNAL write close -write notify GROUP1_CLOSED -connect await GROUP1_CLOSED +connect await RECEIVED_LEAVE_GROUP_SIGNAL "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" @@ -327,29 +330,34 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-2") - .members(2) + .members("consumer-1") + .members("consumer-2") .build() .build()} +write zilla:data.empty + write advise zilla:flush -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-2") .memberId("consumer-2") - .members(1) + .members("consumer-2") .build() .build()} +read notify RECEIVED_LEADER2 + +write zilla:data.empty -write notify GROUP2_FINISHED -connect await GROUP2_FINISHED +connect await RECEIVED_LEADER2 "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt index 5253c8181c..be00f4d0c5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt @@ -62,25 +62,28 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + # On the session stream the heartbeat arrives (on the mqtt_sessions merged stream) read advised zilla:flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(2) + .members("consumer-1") + .members("consumer-2") .build() .build()} write flush @@ -321,17 +324,20 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-2") - .members(2) + .members("consumer-1") + .members("consumer-2") .build() .build()} write flush write notify SESSION2_NOT_LEADER +read zilla:data.empty + # On the session publish stream, send a heartbeat read advised zilla:flush @@ -340,16 +346,17 @@ read advised zilla:flush # Wait until I receive a data frame, that confirms that I'm the leader # Once it's confirmed, I can send the CONNACK -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-2") .memberId("consumer-2") - .members(1) + .members("consumer-2") .build() .build()} write flush +read zilla:data.empty accepted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt index 70ddc96dcb..c80dd8c3b7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt @@ -66,15 +66,18 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} read notify RECEIVED_LEADER_DATA + +write zilla:data.empty + read notify CONNACK_TRIGGERED connect await RECEIVED_LEADER_DATA diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt index d551d404f6..aeba744dc5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt @@ -63,16 +63,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + read await CONNACK_TRIGGERED read abort diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt index ef73e84569..babbaf3778 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt @@ -67,16 +67,19 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} read notify RECEIVED_LEADER_DATA +write zilla:data.empty + + connect await RECEIVED_LEADER_DATA "zilla://streams/kafka0" option zilla:window 8192 diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt index e5c4bb2b04..424d1853d0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt @@ -65,16 +65,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + accepted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt index f85f4f3de5..9b47555d45 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt @@ -65,17 +65,20 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} -read notify RECEIVED_LEADER_DATA +read notify RECEIVED_LEADER -connect await RECEIVED_LEADER_DATA +write zilla:data.empty + + +connect await RECEIVED_LEADER "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt index e1660a080a..9ed5aad6ce 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt @@ -63,16 +63,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + accepted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt index 1e01bc5a4a..489f71b6e7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt @@ -63,19 +63,20 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +read notify RECEIVED_LEADER +write zilla:data.empty -write notify GROUP1_FINISHED -connect await GROUP1_FINISHED +connect await RECEIVED_LEADER "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt index a6dcf27f75..0339f8324e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt @@ -60,16 +60,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + accepted read zilla:begin.ext ${kafka:matchBeginEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt index aec532f06b..45123d301e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt @@ -66,17 +66,20 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} -read notify RECEIVED_LEADER_DATA +read notify RECEIVED_LEADER + +write zilla:data.empty + -connect await RECEIVED_LEADER_DATA +connect await RECEIVED_LEADER "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt index 2d4d9e6aac..96edeb8d81 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt @@ -64,16 +64,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # This is the second prerequisite -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + accepted diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt index 5054bd4156..bffdd9bd34 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt @@ -63,18 +63,20 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty -write notify GROUP1_FINISHED -connect await GROUP1_FINISHED +connect await RECEIVED_LEADER "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt index ec14a6fac1..88215301ce 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt @@ -60,16 +60,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + accepted read zilla:begin.ext ${kafka:matchBeginEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt index 0b066697ae..9b891d6105 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt @@ -63,18 +63,20 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} +read notify RECEIVED_LEADER + +write zilla:data.empty -write notify GROUP1_FINISHED -connect await GROUP1_FINISHED +connect await RECEIVED_LEADER "zilla://streams/kafka0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt index 576e6e0d23..289fcda181 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt @@ -60,16 +60,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("consumer-1") .memberId("consumer-1") - .members(1) + .members("consumer-1") .build() .build()} write flush +read zilla:data.empty + accepted read zilla:begin.ext ${kafka:matchBeginEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt index 7fd4b3fb14..c3072a3fff 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt @@ -191,17 +191,18 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -read zilla:data.null read notify RECEIVED_GROUP_MEMBERS_LEADER +write zilla:data.empty + write abort @@ -318,15 +319,16 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") - .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .build() .build()} read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt index 08da729d9f..19479d32b0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt @@ -194,16 +194,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # send group members (leader) -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} write flush +read zilla:data.empty + read aborted @@ -321,15 +323,16 @@ write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") - .build() - .build() .build()} # no session state diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt index 3ef943399b..de56d9bb7a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt @@ -191,17 +191,18 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -read zilla:data.null read notify RECEIVED_GROUP_MEMBERS_LEADER +write zilla:data.empty + write abort @@ -320,15 +321,16 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") - .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .build() .build()} # no will signals diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt index 8f7ed45fc1..ac8597bdf6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt @@ -194,15 +194,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # send group members (leader) -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -write flush + +read zilla:data.empty read aborted @@ -324,15 +325,16 @@ write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") - .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .build() .build()} # no session state diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt index 6f8757c5e5..654ca5d7a0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt @@ -109,17 +109,18 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -read zilla:data.null read notify RECEIVED_GROUP_MEMBERS_LEADER +write zilla:data.empty + write abort @@ -222,15 +223,16 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") - .build() - .build() .build()} read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt index 840033a4a6..703704c5d6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt @@ -110,16 +110,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # send group members (leader) -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} write flush +read zilla:data.empty + accepted @@ -222,15 +224,16 @@ write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") - .build() - .build() .build()} # no session state diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt index e315decae9..a8480b4e22 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt @@ -178,17 +178,18 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -read zilla:data.null read notify RECEIVED_GROUP_MEMBERS_LEADER +write zilla:data.empty + write close @@ -304,15 +305,16 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") - .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .build() .build()} read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt index c20fe09fdc..f0c59441de 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt @@ -178,16 +178,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # send group members (leader) -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} write flush +read zilla:data.empty + read closed @@ -303,15 +305,16 @@ read notify RECEIVED_WILL_DELIVER_LATER_SIGNAL read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") - .build() - .build() .build()} # no session state diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt index 5af55920f7..3f197ab1f2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt @@ -191,23 +191,24 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -read zilla:data.null read notify RECEIVED_GROUP_MEMBERS_LEADER +write zilla:data.empty -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(2) + .members("member-1") + .members("member-2") .build() .build()} @@ -325,15 +326,16 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") - .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .build() .build()} read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt index 4b30550610..c34daded3c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt @@ -194,23 +194,27 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # send group members (leader) -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} write flush +read zilla:data.empty + write await RECEIVED_WILL_DELIVER_LATER_SIGNAL -write zilla:data.ext ${kafka:dataEx() + +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(2) + .members("member-1") + .members("member-2") .build() .build()} write flush @@ -331,15 +335,16 @@ write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") - .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .build() .build()} # no session state diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt index 715dba25b0..ca62e7f49c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt @@ -168,17 +168,18 @@ write zilla:begin.ext ${kafka:beginEx() connected -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} -read zilla:data.null read notify RECEIVED_GROUP_MEMBERS_LEADER +write zilla:data.empty + write abort @@ -296,15 +297,16 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") - .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .build() .build()} read advised zilla:flush diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt index 20f972bcb5..53548bf530 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt @@ -169,16 +169,18 @@ read zilla:begin.ext ${kafka:matchBeginEx() connected # send group members (leader) -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("member-1") .memberId("member-1") - .members(1) + .members("member-1") .build() .build()} write flush +read zilla:data.empty + accepted @@ -296,15 +298,16 @@ write notify RECEIVED_WILL_DELIVER_LATER_SIGNAL read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("PRODUCE_AND_FETCH") - .filter() - .key("client-1") + .fetch() + .capabilities("PRODUCE_AND_FETCH") + .filter() + .key("client-1") + .build() + .filter() + .key("client-1#migrate") + .headerNot("sender-id", "sender-1") + .build() .build() - .filter() - .key("client-1#migrate") - .headerNot("sender-id", "sender-1") - .build() - .build() .build()} # no session state diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt index 23201284d5..4b6b546d33 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt @@ -73,45 +73,47 @@ read "message2" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() .build() - .build() .build()} write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("three") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("three") + .build() .build() .build() - .build() .build()} write notify RETAIN_FINISHED @@ -177,20 +179,21 @@ read "message" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("three") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("three") + .build() .build() .build() - .build() .build()} write notify SECOND_FLUSH_SENT diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt index 817d4cfab2..dbd72d3264 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt @@ -77,46 +77,48 @@ write flush read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() .build() - .build() .build()} read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("three") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("three") + .build() .build() .build() - .build() .build()} read await RETAIN_FINISHED @@ -183,20 +185,21 @@ write notify FIRST_RETAINED_SENT read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("three") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("three") + .build() .build() .build() - .build() .build()} read await SECOND_FLUSH_SENT diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt index b955b6f287..68c830f04f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt @@ -73,20 +73,21 @@ read "message2" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() .build() - .build() .build()} write notify RETAIN_STARTED read zilla:data.ext ${kafka:matchDataEx() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt index 5640fc9783..7a8de77b33 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt @@ -77,20 +77,21 @@ write flush read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() .build() - .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt index 526b42394f..0cc0083143 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt @@ -108,14 +108,15 @@ read "message" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() .build() - .build() .build()} read zilla:data.ext ${kafka:matchDataEx() @@ -138,20 +139,21 @@ read "message2" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() .build() - .build() .build()} write notify RETAIN_FINISHED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt index f1faf09a87..b121a88e65 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt @@ -107,14 +107,15 @@ write flush read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() .build() - .build() .build()} @@ -140,20 +141,21 @@ write flush read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() .build() - .build() .build()} read await RETAIN_FINISHED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt index 8e124ff4a4..4ff82b94df 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt @@ -73,20 +73,21 @@ read "message2" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() .build() - .build() .build()} write notify MESSAGES_FINISHED diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt index 09a1c02153..77a953a81c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt @@ -77,20 +77,21 @@ write flush read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() - .capabilities("FETCH_ONLY") - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("one") + .fetch() + .capabilities("FETCH_ONLY") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("one") + .build() .build() - .build() - .filter() - .headers("zilla:filter") - .sequence("sensor") - .sequence("two") + .filter() + .headers("zilla:filter") + .sequence("sensor") + .sequence("two") + .build() .build() .build() - .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt index d5318f36d2..299696c150 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -15,7 +15,7 @@ connect "zilla://streams/kafka0" option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:transmission "duplex" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) @@ -38,6 +38,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:filter") @@ -73,4 +74,4 @@ read zilla:data.ext ${kafka:matchDataEx() .build() .build()} -read "message" \ No newline at end of file +read "message" diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt index cfbf2e195c..8ad99f9fc5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -40,6 +40,7 @@ connected read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:filter") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt index 98be76b333..356c44a7b0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -15,7 +15,7 @@ connect "zilla://streams/kafka0" option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:transmission "duplex" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) @@ -38,6 +38,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:filter") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt index c102d10f74..55647a946e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -39,6 +39,7 @@ connected read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:filter") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index 848af3483f..750d5e5eee 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -15,7 +15,7 @@ connect "zilla://streams/kafka0" option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:transmission "duplex" write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) @@ -37,6 +37,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:filter") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index 0222a9b864..bcb29c8bcb 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -39,6 +39,7 @@ connected read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:filter") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt index b7a444fd76..c79b4bde45 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt @@ -43,6 +43,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:filter") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt index 9482350d03..a5e90e95ea 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt @@ -45,6 +45,7 @@ connected read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("zilla:filter") diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java index 0e33fc3df3..f04b2a3b27 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java @@ -42,8 +42,6 @@ public class MqttKafkaConfiguration extends Configuration public static final PropertyDef TIME; public static final BooleanPropertyDef WILL_AVAILABLE; public static final IntPropertyDef WILL_STREAM_RECONNECT_DELAY; - public static final IntPropertyDef SESSION_EXPIRY_INTERVAL_MAX; - public static final IntPropertyDef SESSION_EXPIRY_INTERVAL_MIN; static { @@ -63,8 +61,6 @@ public class MqttKafkaConfiguration extends Configuration MqttKafkaConfiguration::decodeLongSupplier, MqttKafkaConfiguration::defaultTime); WILL_AVAILABLE = config.property("will.available", true); WILL_STREAM_RECONNECT_DELAY = config.property("will.stream.reconnect", 2); - SESSION_EXPIRY_INTERVAL_MAX = config.property("session.expiry.interval.max", 30000); - SESSION_EXPIRY_INTERVAL_MIN = config.property("session.expiry.interval.min", 1000); MQTT_KAFKA_CONFIG = config; } @@ -114,16 +110,6 @@ public int willStreamReconnectDelay() return WILL_STREAM_RECONNECT_DELAY.getAsInt(this); } - public int sessionExpiryIntervalMax() - { - return SESSION_EXPIRY_INTERVAL_MAX.get(this); - } - - public int sessionExpiryIntervalMin() - { - return SESSION_EXPIRY_INTERVAL_MIN.get(this); - } - private static StringSupplier decodeStringSupplier( String fullyQualifiedMethodName) { diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java index c8a34e2025..258fa73218 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java @@ -404,9 +404,10 @@ private void onMqttData( final KafkaFlushExFW kafkaFlushEx = kafkaFlushExRW.wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) - .merged(m -> m.partition(p -> p.partitionId(-1).partitionOffset(-1)) + .merged(m -> m.fetch(f -> f + .partition(p -> p.partitionId(-1).partitionOffset(-1)) .capabilities(c -> c.set(KafkaCapabilities.PRODUCE_ONLY)) - .key(key)) + .key(key))) .build(); retained.doKafkaFlush(traceId, authorization, budgetId, reserved, kafkaFlushEx); } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index 6de885d22c..2d172bec98 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -76,7 +76,8 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaFlushExFW; -import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaGroupDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaGroupBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaGroupFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.KafkaResetExFW; @@ -123,6 +124,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private static final int SIGNAL_CONNECT_WILL_STREAM = 2; private static final int SIGNAL_EXPIRE_SESSION = 3; private static final int SIZE_OF_UUID = 38; + private static final AtomicInteger CONTEXT_COUNTER = new AtomicInteger(0); private final BeginFW beginRO = new BeginFW(); private final DataFW dataRO = new DataFW(); @@ -154,6 +156,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final MqttWillMessageFW mqttWillRO = new MqttWillMessageFW(); private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); private final MqttResetExFW.Builder mqttResetExRW = new MqttResetExFW.Builder(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); private final KafkaResetExFW kafkaResetExRO = new KafkaResetExFW(); private final KafkaFlushExFW kafkaFlushExRO = new KafkaFlushExFW(); @@ -193,9 +196,6 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final InstanceId instanceId; private final boolean willAvailable; private final int reconnectDelay; - private final int sessionExpiryIntervalMaxMillis; - private final int sessionExpiryIntervalMinMillis; - private static AtomicInteger contextCounter = new AtomicInteger(0); private int reconnectAttempt; @@ -235,8 +235,6 @@ public MqttKafkaSessionFactory( this.sessionExpiryIds = new Object2LongHashMap<>(-1); this.instanceId = instanceId; this.reconnectDelay = reconnectDelay.getAsInt(config); - this.sessionExpiryIntervalMaxMillis = config.sessionExpiryIntervalMax(); - this.sessionExpiryIntervalMinMillis = config.sessionExpiryIntervalMin(); } @Override @@ -588,24 +586,24 @@ private void doFlushProduceAndFetchWithFilter( final KafkaFlushExFW kafkaFlushEx = kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) - .merged(m -> + .merged(m -> m.fetch(f -> { - m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)); - m.filtersItem(f -> f.conditionsItem(ci -> + f.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)); + f.filtersItem(fi -> fi.conditionsItem(ci -> ci.key(kb -> kb.length(clientId.length()) .value(clientId.value(), 0, clientId.length())))); - m.filtersItem(f -> + f.filtersItem(fi -> { - f.conditionsItem(ci -> + fi.conditionsItem(ci -> ci.key(kb -> kb.length(clientIdMigrate.length()) .value(clientIdMigrate.value(), 0, clientIdMigrate.length()))); - f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> h.nameLen(SENDER_ID_NAME.length()) .name(SENDER_ID_NAME.value(), 0, SENDER_ID_NAME.length()) .valueLen(sessionId.length()) .value(sessionId.value(), 0, sessionId.length()))))); }); - }) + })) .build(); session.doKafkaFlush(traceId, authorization, budgetId, 0, kafkaFlushEx); @@ -1185,7 +1183,7 @@ else if (type.equals(EXPIRY_SIGNAL_NAME_OCTETS) && sessionExpiryIds.containsKey( expireAt = supplyTime.getAsLong() + expirySignal.delay(); } - final int contextId = contextCounter.incrementAndGet(); + final int contextId = CONTEXT_COUNTER.incrementAndGet(); expiryClientIds.put(contextId, expiryClientId); final long signalId = @@ -1216,7 +1214,7 @@ private void onKafkaFlush( flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; final KafkaMergedFlushExFW kafkaMergedFlushEx = kafkaFlushEx != null && kafkaFlushEx.kind() == KafkaDataExFW.KIND_MERGED ? kafkaFlushEx.merged() : null; - final Array32FW progress = kafkaMergedFlushEx != null ? kafkaMergedFlushEx.progress() : null; + final Array32FW progress = kafkaMergedFlushEx != null ? kafkaMergedFlushEx.fetch().progress() : null; if (progress != null) { @@ -2517,7 +2515,9 @@ private void onKafkaAbort( delegate.doMqttAbort(traceId, authorization); } - protected void sendMigrateSignal(long authorization, long traceId) + protected void sendMigrateSignal( + long traceId, + long authorization) { Flyweight kafkaMigrateDataEx = kafkaDataExRW .wrap(extBuffer, 0, extBuffer.capacity()) @@ -2671,7 +2671,7 @@ protected void onKafkaWindow( delegate.group = new KafkaGroupStream(originId, routedId, delegate); delegate.group.doKafkaBegin(traceId, authorization, 0); - sendMigrateSignal(authorization, traceId); + sendMigrateSignal(traceId, authorization); } } } @@ -3035,6 +3035,10 @@ private void onGroupMessage( final DataFW data = dataRO.wrap(buffer, index, index + length); onKafkaData(data); break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onKafkaFlush(flush); + break; case EndFW.TYPE_ID: final EndFW end = endRO.wrap(buffer, index, index + length); onKafkaEnd(end); @@ -3050,6 +3054,55 @@ private void onGroupMessage( } } + private void onKafkaFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorization = flush.authorization(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + + assert replyAck <= replySeq; + + final OctetsFW extension = flush.extension(); + final ExtensionFW flushEx = extension.get(extensionRO::tryWrap); + final KafkaFlushExFW kafkaFlushEx = + flushEx != null && flushEx.typeId() == kafkaTypeId ? extension.get(kafkaFlushExRO::tryWrap) : null; + final KafkaGroupFlushExFW kafkaGroupDataEx = + kafkaFlushEx != null && kafkaFlushEx.kind() == KafkaFlushExFW.KIND_GROUP ? kafkaFlushEx.group() : null; + final String16FW leaderId = kafkaGroupDataEx != null ? kafkaGroupDataEx.leaderId() : null; + final String16FW memberId = kafkaGroupDataEx != null ? kafkaGroupDataEx.memberId() : null; + final int members = kafkaGroupDataEx != null ? kafkaGroupDataEx.members().fieldCount() : 0; + + if (leaderId.equals(memberId)) + { + if (members > 1) + { + delegate.session.sendMigrateSignal(traceId, authorization); + delegate.session.sendWillSignal(traceId, authorization); + delegate.session.doKafkaEnd(traceId, authorization); + doKafkaEnd(traceId, authorization); + } + else + { + delegate.session.doKafkaEnd(traceId, authorization); + final long routedId = delegate.session.routedId; + delegate.session = new KafkaSessionStateProxy(originId, routedId, delegate); + delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0); + } + } + + if (!MqttKafkaState.initialClosed(state)) + { + doKafkaData(traceId, authorization, 0, 0, DATA_FLAG_COMPLETE, EMPTY_OCTETS, EMPTY_OCTETS); + } + } + private void onKafkaBegin( BeginFW begin) { @@ -3071,20 +3124,30 @@ private void onKafkaBegin( assert replyAck <= replySeq; - Flyweight mqttBeginEx = EMPTY_OCTETS; + final OctetsFW extension = begin.extension(); - final int sessionExpiryMillisInRange = - Math.max(sessionExpiryIntervalMinMillis, Math.min(sessionExpiryIntervalMaxMillis, delegate.sessionExpiryMillis)); + int sessionExpiryMillisInRange = delegate.sessionExpiryMillis; + if (extension.sizeof() > 0) + { + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_GROUP; + final KafkaGroupBeginExFW kafkaGroupBeginEx = kafkaBeginEx.group(); + + sessionExpiryMillisInRange = kafkaGroupBeginEx.timeout(); + } + + Flyweight mqttBeginEx = EMPTY_OCTETS; if (delegate.sessionExpiryMillis != sessionExpiryMillisInRange) { + delegate.sessionExpiryMillis = sessionExpiryMillisInRange; mqttBeginEx = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) .typeId(mqttTypeId) .session(sessionBuilder -> sessionBuilder .flags(delegate.sessionFlags) - .expiry((int) TimeUnit.MILLISECONDS.toSeconds(sessionExpiryMillisInRange)) + .expiry((int) TimeUnit.MILLISECONDS.toSeconds(delegate.sessionExpiryMillis)) .clientId(delegate.clientId)) .build(); - delegate.sessionExpiryMillis = sessionExpiryMillisInRange; } delegate.doMqttBegin(traceId, authorization, affinity, mqttBeginEx); @@ -3111,36 +3174,6 @@ private void onKafkaData( doKafkaReset(traceId); delegate.doMqttAbort(traceId, authorization); } - else - { - final OctetsFW extension = data.extension(); - final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); - final KafkaDataExFW kafkaDataEx = - dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; - final KafkaGroupDataExFW kafkaGroupDataEx = - kafkaDataEx != null && kafkaDataEx.kind() == KafkaDataExFW.KIND_GROUP ? kafkaDataEx.group() : null; - final String16FW leaderId = kafkaGroupDataEx != null ? kafkaGroupDataEx.leaderId() : null; - final String16FW memberId = kafkaGroupDataEx != null ? kafkaGroupDataEx.memberId() : null; - final int members = kafkaGroupDataEx != null ? kafkaGroupDataEx.members() : 0; - - if (leaderId.equals(memberId)) - { - if (members > 1) - { - delegate.session.sendMigrateSignal(authorization, traceId); - delegate.session.sendWillSignal(authorization, traceId); - delegate.session.doKafkaEnd(traceId, authorization); - doKafkaEnd(traceId, authorization); - } - else - { - delegate.session.doKafkaEnd(traceId, authorization); - final long routedId = delegate.session.routedId; - delegate.session = new KafkaSessionStateProxy(originId, routedId, delegate); - delegate.session.doKafkaBeginIfNecessary(traceId, authorization, 0); - } - } - } } private void onKafkaEnd( @@ -3218,6 +3251,24 @@ private void doKafkaWindow( doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, budgetId, padding, replyPad, capabilities); } + + private void doKafkaData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + + doData(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java index c52e372820..853b280aa3 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java @@ -368,19 +368,19 @@ private void onMqttFlush( final KafkaFlushExFW kafkaFlushEx = kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) - .merged(m -> + .merged(m -> m.fetch(f -> { - m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); + f.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); filters.forEach(filter -> { if ((filter.flags() & SEND_RETAIN_FLAG) != 0) { retainAvailable = true; } - m.filtersItem(f -> + f.filtersItem(fi -> { final int subscriptionId = (int) filter.subscriptionId(); - f.conditionsItem(ci -> + fi.conditionsItem(ci -> { if (!messagesSubscriptionIds.contains(subscriptionId)) { @@ -393,7 +393,7 @@ private void onMqttFlush( if (noLocal) { final DirectBuffer valueBuffer = clientId.value(); - f.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> + fi.conditionsItem(i -> i.not(n -> n.condition(c -> c.header(h -> h.nameLen(helper.kafkaLocalHeaderName.sizeof()) .name(helper.kafkaLocalHeaderName) .valueLen(valueBuffer.capacity()) @@ -401,7 +401,7 @@ private void onMqttFlush( } }); }); - }) + })) .build(); messages.doKafkaFlush(traceId, authorization, budgetId, reserved, kafkaFlushEx); @@ -1270,15 +1270,15 @@ private void doKafkaFlush( final KafkaFlushExFW retainedKafkaFlushEx = kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) - .merged(m -> + .merged(m -> m.fetch(f -> { - m.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); + f.capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)); retainedFilters.forEach(filter -> { - m.filtersItem(f -> + f.filtersItem(fi -> { final int subscriptionId = (int) filter.subscriptionId(); - f.conditionsItem(ci -> + fi.conditionsItem(ci -> { if (!mqtt.messagesSubscriptionIds.contains(subscriptionId)) { @@ -1288,7 +1288,7 @@ private void doKafkaFlush( }); }); }); - }) + })) .build(); doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax, diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java index 40cbaa208b..cc7a4145c3 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java @@ -19,8 +19,6 @@ import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.LIFETIME_ID; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.MESSAGES_TOPIC; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.RETAINED_MESSAGES_TOPIC; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_EXPIRY_INTERVAL_MAX; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_EXPIRY_INTERVAL_MIN; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_ID; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.TIME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.WILL_AVAILABLE; @@ -41,8 +39,6 @@ public class MqttKafkaConfigurationTest public static final String WILL_ID_NAME = "zilla.binding.mqtt.kafka.will.id"; public static final String LIFETIME_ID_NAME = "zilla.binding.mqtt.kafka.lifetime.id"; public static final String INSTANCE_ID_NAME = "zilla.binding.mqtt.kafka.instance.id"; - public static final String SESSION_EXPIRY_INTERVAL_MAX_NAME = "zilla.binding.mqtt.kafka.session.expiry.interval.max"; - public static final String SESSION_EXPIRY_INTERVAL_MIN_NAME = "zilla.binding.mqtt.kafka.session.expiry.interval.min"; @Test public void shouldVerifyConstants() @@ -56,7 +52,5 @@ public void shouldVerifyConstants() assertEquals(WILL_ID.name(), WILL_ID_NAME); assertEquals(LIFETIME_ID.name(), LIFETIME_ID_NAME); assertEquals(INSTANCE_ID.name(), INSTANCE_ID_NAME); - assertEquals(SESSION_EXPIRY_INTERVAL_MAX.name(), SESSION_EXPIRY_INTERVAL_MAX_NAME); - assertEquals(SESSION_EXPIRY_INTERVAL_MIN.name(), SESSION_EXPIRY_INTERVAL_MIN_NAME); } } diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java index be7f8a9369..cf7312a8aa 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java @@ -16,8 +16,6 @@ import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.INSTANCE_ID_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.LIFETIME_ID_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_EXPIRY_INTERVAL_MAX_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_EXPIRY_INTERVAL_MIN_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.SESSION_ID_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.TIME_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfigurationTest.WILL_AVAILABLE_NAME; @@ -75,7 +73,6 @@ public class MqttKafkaSessionProxyIT @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_EXPIRY_INTERVAL_MAX_NAME, value = "30000") @Specification({ "${mqtt}/session.connect.override.max.session.expiry/client", "${kafka}/session.connect.override.max.session.expiry/server"}) @@ -87,8 +84,6 @@ public void shouldConnectServerOverridesSessionExpiryTooBig() throws Exception @Test @Configuration("proxy.yaml") @Configure(name = WILL_AVAILABLE_NAME, value = "false") - @Configure(name = SESSION_EXPIRY_INTERVAL_MAX_NAME, value = "30000") - @Configure(name = SESSION_EXPIRY_INTERVAL_MIN_NAME, value = "2000") @Specification({ "${mqtt}/session.connect.override.min.session.expiry/client", "${kafka}/session.connect.override.min.session.expiry/server"}) diff --git a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java index 52a15e91ec..d8621a9f2e 100644 --- a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java +++ b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java @@ -86,7 +86,7 @@ import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaFetchFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaGroupBeginExFW; -import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaGroupDataExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaGroupFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaMergedFlushExFW; @@ -1075,9 +1075,6 @@ private void onKafkaDataEx( case KafkaDataExFW.KIND_DESCRIBE: onKafkaDescribeDataEx(offset, timestamp, kafkaDataEx.describe()); break; - case KafkaDataExFW.KIND_GROUP: - onKafkaGroupDataEx(offset, timestamp, kafkaDataEx.group()); - break; case KafkaDataExFW.KIND_FETCH: onKafkaFetchDataEx(offset, timestamp, kafkaDataEx.fetch()); break; @@ -1105,17 +1102,6 @@ private void onKafkaDescribeDataEx( format("%s: %s", c.name().asString(), c.value().asString()))); } - private void onKafkaGroupDataEx( - int offset, - long timestamp, - KafkaGroupDataExFW group) - { - String16FW leader = group.leaderId(); - String16FW member = group.memberId(); - - out.printf(verboseFormat, index, offset, timestamp, format("[group] %s %s", leader.asString(), member.asString())); - } - private void onKafkaFetchDataEx( int offset, long timestamp, @@ -1195,6 +1181,9 @@ private void onKafkaFlushEx( case KafkaFlushExFW.KIND_MERGED: onKafkaMergedFlushEx(offset, timestamp, kafkaFlushEx.merged()); break; + case KafkaFlushExFW.KIND_GROUP: + onKafkaGroupFlushEx(offset, timestamp, kafkaFlushEx.group()); + break; case KafkaFlushExFW.KIND_FETCH: onKafkaFetchFlushEx(offset, timestamp, kafkaFlushEx.fetch()); break; @@ -1206,8 +1195,8 @@ private void onKafkaMergedFlushEx( long timestamp, KafkaMergedFlushExFW merged) { - final ArrayFW progress = merged.progress(); - final Array32FW filters = merged.filters(); + final ArrayFW progress = merged.fetch().progress(); + final Array32FW filters = merged.fetch().filters(); out.printf(verboseFormat, index, offset, timestamp, "[merged]"); progress.forEach(p -> out.printf(verboseFormat, index, offset, timestamp, @@ -1219,6 +1208,18 @@ private void onKafkaMergedFlushEx( filters.forEach(f -> f.conditions().forEach(c -> out.printf(verboseFormat, index, offset, timestamp, asString(c)))); } + private void onKafkaGroupFlushEx( + int offset, + long timestamp, + KafkaGroupFlushExFW group) + { + String16FW leader = group.leaderId(); + String16FW member = group.memberId(); + + out.printf(verboseFormat, index, offset, timestamp, format("[group] %s %s (%d)", leader.asString(), + member.asString(), group.members().fieldCount())); + } + private void onKafkaFetchFlushEx( int offset, long timestamp, diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java index de57588c10..8c547bae7e 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java @@ -66,6 +66,11 @@ public KafkaCacheClientFactory( final KafkaCacheGroupFactory cacheGroupFactory = new KafkaCacheGroupFactory(config, context, bindings::get); + final KafkaCacheConsumerFactory consumerGroupFactory = new KafkaCacheConsumerFactory(config, context, bindings::get); + + final KafkaCacheOffsetFetchFactory cacheOffsetFetchFactory = + new KafkaCacheOffsetFetchFactory(config, context, bindings::get); + final KafkaCacheClientFetchFactory cacheFetchFactory = new KafkaCacheClientFetchFactory( config, context, bindings::get, accountant::supplyDebitor, supplyCache, supplyCacheRoute); @@ -79,6 +84,8 @@ public KafkaCacheClientFactory( factories.put(KafkaBeginExFW.KIND_META, cacheMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, cacheDescribeFactory); factories.put(KafkaBeginExFW.KIND_GROUP, cacheGroupFactory); + factories.put(KafkaBeginExFW.KIND_CONSUMER, consumerGroupFactory); + factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, cacheOffsetFetchFactory); factories.put(KafkaBeginExFW.KIND_FETCH, cacheFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, cacheProduceFactory); factories.put(KafkaBeginExFW.KIND_MERGED, cacheMergedFactory); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java new file mode 100644 index 0000000000..6396dfe1af --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java @@ -0,0 +1,1022 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongSupplier; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.IntHashSet; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaTopicPartitionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; + +public final class KafkaCacheConsumerFactory implements BindingHandler +{ + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + private final KafkaDataExFW kafkaDataExRO = new KafkaDataExFW(); + private final KafkaResetExFW kafkaResetExRO = new KafkaResetExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BufferPool bufferPool; + private final Signaler signaler; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongSupplier supplyTraceId; + private final LongFunction supplyNamespace; + private final LongFunction supplyLocalName; + private final LongFunction supplyBinding; + + private final Object2ObjectHashMap clientConsumerFansByGroupId; + + public KafkaCacheConsumerFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.signaler = context.signaler(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyTraceId = context::supplyTraceId; + this.supplyNamespace = context::supplyNamespace; + this.supplyLocalName = context::supplyLocalName; + this.supplyBinding = supplyBinding; + this.clientConsumerFansByGroupId = new Object2ObjectHashMap<>(); + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_CONSUMER; + final KafkaConsumerBeginExFW kafkaConsumerBeginEx = kafkaBeginEx.consumer(); + final String groupId = kafkaConsumerBeginEx.groupId().asString(); + final String topic = kafkaConsumerBeginEx.topic().asString(); + final String consumerId = kafkaConsumerBeginEx.consumerId().asString(); + final int timeout = kafkaConsumerBeginEx.timeout(); + final IntHashSet partitions = new IntHashSet(); + kafkaConsumerBeginEx.partitionIds().forEach(p -> partitions.add(p.partitionId())); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topic, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + KafkaCacheConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); + + if (fanout == null) + { + KafkaCacheConsumerFanout newFanout = + new KafkaCacheConsumerFanout(routedId, resolvedId, authorization, groupId, + topic, consumerId, partitions, timeout); + fanout = newFanout; + clientConsumerFansByGroupId.put(groupId, fanout); + } + + newStream = new KafkaCacheConsumerStream( + fanout, + sender, + originId, + routedId, + initialId, + affinity, + authorization + )::onConsumerMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.limit()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaCacheConsumerFanout + { + private final long originId; + private final long routedId; + private final long authorization; + private final String groupId; + private final String topic; + private final String consumerId; + private final int timeout; + private final List members; + private final IntHashSet partitions; + private final IntHashSet assignedPartitions; + private final Object2ObjectHashMap assignments; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + + + private KafkaCacheConsumerFanout( + long originId, + long routedId, + long authorization, + String groupId, + String topic, + String consumerId, + IntHashSet partitions, + int timeout) + { + this.originId = originId; + this.routedId = routedId; + this.authorization = authorization; + this.groupId = groupId; + this.topic = topic; + this.consumerId = consumerId; + this.partitions = partitions; + this.timeout = timeout; + this.members = new ArrayList<>(); + this.assignedPartitions = new IntHashSet(); + this.assignments = new Object2ObjectHashMap<>(); + } + + private void onConsumerFanoutMemberOpening( + long traceId, + KafkaCacheConsumerStream member) + { + members.add(member); + + assert !members.isEmpty(); + + doConsumerFanoutInitialBeginIfNecessary(traceId); + + if (KafkaState.initialOpened(state)) + { + member.doConsumerInitialWindow(traceId, 0L, 0, 0, 0); + } + + if (KafkaState.replyOpened(state)) + { + member.doConsumerReplyBeginIfNecessary(traceId); + } + } + + private void onConsumerFanoutMemberOpened( + long traceId, + KafkaCacheConsumerStream member) + { + if (!assignedPartitions.isEmpty()) + { + final KafkaDataExFW kafkaDataEx = + kafkaDataExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .consumer(m -> m + .partitions(p -> + assignedPartitions.forEach(ap -> p.item(np -> np.partitionId(ap)))) + .assignments(a -> + assignments.forEach((k, v) -> a.item(na -> na.consumerId(k) + .partitions(pa -> + v.forEach(pi -> pa.item(pai -> pai.partitionId(pi)))))))) + .build(); + member.doConsumerReplyDataIfNecessary(traceId, kafkaDataEx); + } + } + + private void onConsumerFanoutMemberClosed( + long traceId, + KafkaCacheConsumerStream member) + { + members.remove(member); + + if (members.isEmpty()) + { + doConsumerFanoutInitialEndIfNecessary(traceId); + doConsumerFanoutReplyResetIfNecessary(traceId); + } + } + + private void doConsumerFanoutInitialBeginIfNecessary( + long traceId) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + doConsumerFanoutInitialBegin(traceId); + } + } + + private void doConsumerFanoutInitialBegin( + long traceId) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onConsumerFanoutMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, 0L, + ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .consumer(m -> m.groupId(groupId) + .consumerId(consumerId) + .timeout(timeout) + .topic(topic) + .partitionIds(p -> partitions.forEach(tp -> p.item(np -> np.partitionId(tp.intValue()))))) + .build() + .sizeof())); + state = KafkaState.openingInitial(state); + } + + private void doConsumerFanoutInitialEndIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doConsumerFanoutInitialEnd(traceId); + } + } + + private void doConsumerFanoutInitialEnd( + long traceId) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + + private void doConsumerFanoutInitialAbortIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doConsumerFanoutInitialAbort(traceId); + } + } + + private void doConsumerFanoutInitialAbort( + long traceId) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + + private void onConsumerFanoutInitialReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + final OctetsFW extension = reset.extension(); + + final KafkaResetExFW kafkaResetEx = extension.get(kafkaResetExRO::tryWrap); + final int error = kafkaResetEx != null ? kafkaResetEx.error() : -1; + + state = KafkaState.closedInitial(state); + + doConsumerFanoutReplyResetIfNecessary(traceId); + + members.forEach(s -> s.doConsumerInitialResetIfNecessary(traceId)); + } + + private void onConsumerFanoutInitialWindow( + WindowFW window) + { + if (!KafkaState.initialOpened(state)) + { + + final long traceId = window.traceId(); + + state = KafkaState.openedInitial(state); + + members.forEach(s -> s.doConsumerInitialWindow(traceId, 0L, 0, 0, 0)); + } + } + + private void onConsumerFanoutMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerFanoutReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConsumerFanoutReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerFanoutReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerFanoutReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerFanoutInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerFanoutInitialWindow(window); + break; + default: + break; + } + } + + private void onConsumerFanoutReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + members.forEach(s -> s.doConsumerReplyBeginIfNecessary(traceId)); + + doConsumerFanoutReplyWindow(traceId, 0, bufferPool.slotCapacity()); + } + + private void onConsumerFanoutReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int reserved = data.reserved(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final KafkaDataExFW kafkaDataEx = dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; + assert kafkaDataEx == null || kafkaDataEx.kind() == KafkaBeginExFW.KIND_CONSUMER; + final KafkaConsumerDataExFW kafkaConsumerDataEx = kafkaDataEx != null ? kafkaDataEx.consumer() : null; + + if (kafkaConsumerDataEx != null) + { + final Array32FW newPartitions = kafkaConsumerDataEx.partitions(); + final Array32FW newAssignments = kafkaConsumerDataEx.assignments(); + + assignedPartitions.clear(); + newPartitions.forEach(p -> this.assignedPartitions.add(p.partitionId())); + + assignments.clear(); + newAssignments.forEach(a -> + { + IntHashSet partitions = new IntHashSet(); + a.partitions().forEach(p -> partitions.add(p.partitionId())); + assignments.put(a.consumerId().asString(), partitions); + }); + + members.forEach(s -> s.doConsumerReplyDataIfNecessary(traceId, kafkaDataEx)); + } + + doConsumerFanoutReplyWindow(traceId, 0, replyMax); + } + + private void onConsumerFanoutReplyEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + doConsumerFanoutInitialEndIfNecessary(traceId); + + members.forEach(s -> s.doConsumerReplyEndIfNecessary(traceId)); + } + + private void onConsumerFanoutReplyAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + doConsumerFanoutInitialAbortIfNecessary(traceId); + + members.forEach(s -> s.doConsumerReplyAbortIfNecessary(traceId)); + } + + private void doConsumerFanoutReplyResetIfNecessary( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doConsumerFanoutReplyReset(traceId); + } + } + + private void doConsumerFanoutReplyReset( + long traceId) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization); + + state = KafkaState.closedReply(state); + } + + private void doConsumerFanoutReplyWindow( + long traceId, + int minReplyNoAck, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, 0L, 0); + } + } + } + + private final class KafkaCacheConsumerStream + { + private final KafkaCacheConsumerFanout group; + private final MessageConsumer sender; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private long replyBudgetId; + + KafkaCacheConsumerStream( + KafkaCacheConsumerFanout group, + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization) + { + this.group = group; + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + } + + private void onConsumerMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerInitialBegin(begin); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerInitialEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerReplyReset(reset); + break; + default: + break; + } + } + + private void onConsumerInitialBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingInitial(state); + + group.onConsumerFanoutMemberOpening(traceId, this); + } + + private void onConsumerInitialEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedInitial(state); + + group.onConsumerFanoutMemberClosed(traceId, this); + + doConsumerReplyEndIfNecessary(traceId); + } + + private void onConsumerInitialAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedInitial(state); + + group.onConsumerFanoutMemberClosed(traceId, this); + + doConsumerReplyAbortIfNecessary(traceId); + } + + private void doConsumerInitialResetIfNecessary( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + doConsumerInitialReset(traceId); + } + + state = KafkaState.closedInitial(state); + } + + private void doConsumerInitialReset( + long traceId) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization); + } + + private void doConsumerInitialWindow( + long traceId, + long budgetId, + int minInitialNoAck, + int minInitialPad, + int minInitialMax) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !KafkaState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + state = KafkaState.openedInitial(state); + + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, minInitialPad); + } + } + + private void doConsumerReplyBeginIfNecessary( + long traceId) + { + if (!KafkaState.replyOpening(state)) + { + doConsumerReplyBegin(traceId); + } + } + + private void doConsumerReplyBegin( + long traceId) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + private void doConsumerReplyDataIfNecessary( + long traceId, + Flyweight extension) + { + if (KafkaState.replyOpened(state)) + { + doConsumerReplyData(traceId, extension); + } + } + + private void doConsumerReplyData( + long traceId, + Flyweight extension) + { + final int reserved = replyPad; + + doDataNull(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, extension); + + replySeq += reserved; + } + + private void doConsumerReplyEndIfNecessary( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doConsumerReplyEnd(traceId); + } + + state = KafkaState.closedReply(state); + } + + private void doConsumerReplyEnd( + long traceId) + { + state = KafkaState.closedReply(state); + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + private void doConsumerReplyAbortIfNecessary( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doConsumerReplyAbort(traceId); + } + + state = KafkaState.closedReply(state); + } + + private void doConsumerReplyAbort( + long traceId) + { + state = KafkaState.closedReply(state); + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + private void onConsumerReplyReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + group.onConsumerFanoutMemberClosed(traceId, this); + + doConsumerInitialResetIfNecessary(traceId); + } + + private void onConsumerReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + this.replyAck = acknowledge; + this.replyMax = maximum; + this.replyPad = padding; + this.replyBudgetId = budgetId; + + assert replyAck <= replySeq; + + if (!KafkaState.replyOpened(state)) + { + state = KafkaState.openedReply(state); + + final long traceId = window.traceId(); + group.onConsumerFanoutMemberOpened(traceId, this); + } + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java index e4e4ceb6a9..63ba697551 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java @@ -153,7 +153,7 @@ private MessageConsumer newStream( long traceId, long authorization, long affinity, - Consumer extension) + OctetsFW extension) { final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -313,7 +313,7 @@ private void doFlush( long authorization, long budgetId, int reserved, - Consumer extension) + OctetsFW extension) { final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -477,7 +477,8 @@ private KafkaCacheGroupNet( } private void doGroupInitialBegin( - long traceId) + long traceId, + OctetsFW extension) { if (KafkaState.closed(state)) { @@ -497,14 +498,7 @@ private void doGroupInitialBegin( this.replyId = supplyReplyId.applyAsLong(initialId); this.receiver = newStream(this::onGroupMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, 0L, - ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) - .typeId(kafkaTypeId) - .group(g -> g.groupId(delegate.groupId) - .protocol(delegate.protocol) - .timeout(delegate.timeout)) - .build() - .sizeof())); + traceId, authorization, 0L, extension); state = KafkaState.openingInitial(state); } } @@ -527,10 +521,11 @@ private void doGroupInitialData( } private void doGroupInitialFlush( - long traceId) + long traceId, + OctetsFW extension) { doFlush(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, initialBud, 0, EMPTY_EXTENSION); + traceId, authorization, initialBud, 0, extension); } private void doGroupInitialEnd( @@ -620,6 +615,10 @@ private void onGroupMessage( final DataFW data = dataRO.wrap(buffer, index, index + length); onGroupReplyData(data); break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onGroupReplyFlush(flush); + break; case EndFW.TYPE_ID: final EndFW end = endRO.wrap(buffer, index, index + length); onGroupReplyEnd(end); @@ -673,6 +672,26 @@ private void onGroupReplyData( delegate.doGroupReplyData(traceId, flags, reserved, payload, extension); } + private void onGroupReplyFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final int reserved = flush.reserved(); + final OctetsFW extension = flush.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + delegate.doGroupReplyFlush(traceId, extension); + } + private void onGroupReplyEnd( EndFW end) { @@ -838,6 +857,7 @@ private void onGroupInitialBegin( final long traceId = begin.traceId(); final long authorization = begin.authorization(); final long affinity = begin.affinity(); + final OctetsFW extension = begin.extension(); assert acknowledge <= sequence; assert sequence >= initialSeq; @@ -849,7 +869,7 @@ private void onGroupInitialBegin( assert initialAck <= initialSeq; - group.doGroupInitialBegin(traceId); + group.doGroupInitialBegin(traceId, extension); } private void onGroupInitialData( @@ -899,6 +919,7 @@ private void onGroupInitialFlush( final long sequence = flush.sequence(); final long acknowledge = flush.acknowledge(); final long traceId = flush.traceId(); + final OctetsFW extension = flush.extension(); assert acknowledge <= sequence; assert sequence >= initialSeq; @@ -908,7 +929,7 @@ private void onGroupInitialFlush( assert initialAck <= initialSeq; - group.doGroupInitialFlush(traceId); + group.doGroupInitialFlush(traceId, extension); } private void onGroupInitialAbort( @@ -980,6 +1001,14 @@ private void doGroupReplyData( replySeq += reserved; } + private void doGroupReplyFlush( + long traceId, + OctetsFW extension) + { + doFlush(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, 0, extension); + } + private void doGroupReplyEnd( long traceId) { diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java new file mode 100644 index 0000000000..625dd927dd --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java @@ -0,0 +1,1046 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetFetchBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; + + +public final class KafkaCacheOffsetFetchFactory implements BindingHandler +{ + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final FlushFW flushRO = new FlushFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final BufferPool bufferPool; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongFunction supplyBinding; + + public KafkaCacheOffsetFetchFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBinding = supplyBinding; + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_OFFSET_FETCH; + final KafkaOffsetFetchBeginExFW kafkaOffsetFetchBeginEx = kafkaBeginEx.offsetFetch(); + final String groupId = kafkaOffsetFetchBeginEx.groupId().asString(); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, null, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + newStream = new KafkaCacheOffsetFetchApp( + sender, + originId, + routedId, + initialId, + affinity, + authorization, + resolvedId)::onOffsetFetchMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + OctetsFW extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Consumer extension) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaCacheOffsetFetchNet + { + private final long originId; + private final long routedId; + private final long authorization; + private final KafkaCacheOffsetFetchApp delegate; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private long initialBud; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private KafkaCacheOffsetFetchNet( + KafkaCacheOffsetFetchApp delegate, + long originId, + long routedId, + long authorization) + { + this.delegate = delegate; + this.originId = originId; + this.routedId = routedId; + this.receiver = MessageConsumer.NOOP; + this.authorization = authorization; + } + + private void doOffsetFetchInitialBegin( + long traceId, + long affinity, + OctetsFW extension) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onOffsetFetchMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, this.authorization, affinity, extension); + state = KafkaState.openingInitial(state); + } + } + + private void doOffsetFetchInitialData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + OctetsFW payload, + Flyweight extension) + { + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, payload, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doOffsetFetchInitialFlush( + long traceId) + { + doFlush(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, initialBud, 0, EMPTY_EXTENSION); + } + + private void doOffsetFetchInitialEnd( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doOffsetFetchInitialAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void onOffsetFetchInitialReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + + delegate.initialAck = acknowledge; + state = KafkaState.closedInitial(state); + + assert delegate.initialAck <= delegate.initialSeq; + + delegate.doOffsetFetchInitialReset(traceId); + + doOffsetFetchReplyReset(traceId); + } + + + private void onOffsetFetchInitialWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= delegate.initialAck; + assert maximum >= delegate.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + initialBud = budgetId; + state = KafkaState.openedInitial(state); + + assert initialAck <= initialSeq; + + delegate.doOffsetFetchInitialWindow(authorization, traceId, budgetId, padding); + } + + private void onOffsetFetchMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onOffsetFetchReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onOffsetFetchReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onOffsetFetchReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onOffsetFetchReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onOffsetFetchInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onOffsetFetchInitialWindow(window); + break; + default: + break; + } + } + + private void onOffsetFetchReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + delegate.doOffsetFetchReplyBegin(traceId, begin.extension()); + } + + private void onOffsetFetchReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + delegate.doOffsetFetchReplyData(traceId, flags, reserved, payload, extension); + } + + private void onOffsetFetchReplyEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doOffsetFetchReplyEnd(traceId); + } + + private void onOffsetFetchReplyAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + delegate.doOffsetFetchReplyAbort(traceId); + } + + private void doOffsetFetchReplyReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization); + + state = KafkaState.closedReply(state); + } + } + + private void doOffsetFetchReplyWindow( + long traceId, + long authorization, + long budgetId, + int padding) + { + replyAck = Math.max(delegate.replyAck - replyPad, 0); + replyMax = delegate.replyMax; + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding + replyPad); + } + } + + private final class KafkaCacheOffsetFetchApp + { + private final KafkaCacheOffsetFetchNet offsetFetch; + private final MessageConsumer sender; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + private long replyBudgetId; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private long replyBud; + private int replyCap; + + KafkaCacheOffsetFetchApp( + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization, + long resolvedId) + { + this.offsetFetch = new KafkaCacheOffsetFetchNet(this, routedId, resolvedId, authorization); + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + } + + private void onOffsetFetchMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onOffsetFetchInitialBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onOffsetFetchInitialData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onOffsetFetchInitialEnd(end); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onOffsetFetchInitialFlush(flush); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onOffsetFetchInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onOffsetFetchReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onOffsetFetchReplyReset(reset); + break; + default: + break; + } + } + + private void onOffsetFetchInitialBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + final OctetsFW extension = begin.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + state = KafkaState.openingInitial(state); + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialBegin(traceId, affinity, extension); + } + + private void onOffsetFetchInitialData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialData(traceId, authorization, budgetId, reserved, flags, payload, extension); + } + + private void onOffsetFetchInitialEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialEnd(traceId); + } + + private void onOffsetFetchInitialFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialFlush(traceId); + } + + private void onOffsetFetchInitialAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + offsetFetch.doOffsetFetchInitialAbort(traceId); + } + + private void doOffsetFetchInitialReset( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization); + } + + state = KafkaState.closedInitial(state); + } + + private void doOffsetFetchInitialWindow( + long authorization, + long traceId, + long budgetId, + int padding) + { + initialAck = offsetFetch.initialAck; + initialMax = offsetFetch.initialMax; + + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding); + } + + private void doOffsetFetchReplyBegin( + long traceId, + OctetsFW extension) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, extension); + } + + private void doOffsetFetchReplyData( + long traceId, + int flag, + int reserved, + OctetsFW payload, + Flyweight extension) + { + + doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, flag, reserved, payload, extension); + + replySeq += reserved; + } + + private void doOffsetFetchReplyEnd( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void doOffsetFetchReplyAbort( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void onOffsetFetchReplyReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final int maximum = reset.maximum(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + cleanup(traceId); + } + + private void onOffsetFetchReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyBud = budgetId; + replyPad = padding; + replyCap = capabilities; + state = KafkaState.openedReply(state); + + assert replyAck <= replySeq; + + offsetFetch.doOffsetFetchReplyWindow(traceId, acknowledge, budgetId, padding); + } + + private void cleanup( + long traceId) + { + doOffsetFetchInitialReset(traceId); + doOffsetFetchReplyAbort(traceId); + + offsetFetch.doOffsetFetchInitialAbort(traceId); + offsetFetch.doOffsetFetchReplyReset(traceId); + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java index 1d6ea15226..72c431cf91 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java @@ -69,6 +69,11 @@ public KafkaCacheServerFactory( final KafkaCacheGroupFactory cacheGroupFactory = new KafkaCacheGroupFactory(config, context, bindings::get); + final KafkaCacheConsumerFactory consumerGroupFactory = new KafkaCacheConsumerFactory(config, context, bindings::get); + + final KafkaCacheOffsetFetchFactory cacheOffsetFetchFactory = + new KafkaCacheOffsetFetchFactory(config, context, bindings::get); + final KafkaCacheServerFetchFactory cacheFetchFactory = new KafkaCacheServerFetchFactory( config, context, bindings::get, supplyCache, supplyCacheRoute); @@ -79,6 +84,8 @@ public KafkaCacheServerFactory( factories.put(KafkaBeginExFW.KIND_META, cacheMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, cacheDescribeFactory); factories.put(KafkaBeginExFW.KIND_GROUP, cacheGroupFactory); + factories.put(KafkaBeginExFW.KIND_CONSUMER, consumerGroupFactory); + factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, cacheOffsetFetchFactory); factories.put(KafkaBeginExFW.KIND_FETCH, cacheFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, cacheProduceFactory); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java new file mode 100644 index 0000000000..7e81172693 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java @@ -0,0 +1,1322 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.IntHashSet; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.MemberAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.TopicAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFlushExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupTopicMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; + +public final class KafkaClientConsumerFactory implements BindingHandler +{ + private static final Consumer EMPTY_EXTENSION = ex -> {}; + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final FlushFW flushRO = new FlushFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + private final KafkaFlushExFW kafkaFlushExRO = new KafkaFlushExFW(); + private final KafkaGroupMemberMetadataFW kafkaGroupMemberMetadataRO = new KafkaGroupMemberMetadataFW(); + private final Array32FW groupTopicsMetadataRO = + new Array32FW<>(new KafkaGroupTopicMetadataFW()); + private final Array32FW topicAssignmentsRO = + new Array32FW<>(new TopicAssignmentFW()); + + private final Array32FW.Builder memberAssignmentRW = + new Array32FW.Builder<>(new MemberAssignmentFW.Builder(), new MemberAssignmentFW()); + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); + private final KafkaGroupMemberMetadataFW.Builder kafkaGroupMemberMetadataRW = new KafkaGroupMemberMetadataFW.Builder(); + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BufferPool bufferPool; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongFunction supplyBinding; + private final Object2ObjectHashMap clientConsumerFansByGroupId; + + public KafkaClientConsumerFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding) + { + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.bufferPool = context.bufferPool(); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBinding = supplyBinding; + this.clientConsumerFansByGroupId = new Object2ObjectHashMap<>(); + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null && beginEx.typeId() == kafkaTypeId; + final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap); + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_CONSUMER; + final KafkaConsumerBeginExFW kafkaConsumerBeginEx = kafkaBeginEx.consumer(); + final String groupId = kafkaConsumerBeginEx.groupId().asString(); + final String topic = kafkaConsumerBeginEx.topic().asString(); + final String consumerId = kafkaConsumerBeginEx.consumerId().asString(); + final int timeout = kafkaConsumerBeginEx.timeout(); + final List partitions = new ArrayList<>(); + kafkaConsumerBeginEx.partitionIds().forEach(p -> partitions.add(p.partitionId())); + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topic, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + KafkaClientConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); + + if (fanout == null) + { + KafkaClientConsumerFanout newFanout = + new KafkaClientConsumerFanout(routedId, resolvedId, authorization, consumerId, groupId, timeout); + fanout = newFanout; + clientConsumerFansByGroupId.put(groupId, fanout); + } + + newStream = new KafkaClientConsumerStream( + fanout, + sender, + originId, + routedId, + initialId, + affinity, + authorization, + topic, + partitions)::onConsumerMessage; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + DirectBuffer buffer, + int offset, + int limit, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(buffer, offset, limit) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Consumer extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Consumer extension) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + final class KafkaClientConsumerFanout + { + private final String consumerId; + private final String groupId; + private final long originId; + private final long routedId; + private final long authorization; + private final int timeout; + private final List streams; + private final Object2ObjectHashMap members; + private final Object2ObjectHashMap partitionsByTopic; + private final Object2ObjectHashMap> assignment; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private long initialBud; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private String leaderId; + private String memberId; + + + private KafkaClientConsumerFanout( + long originId, + long routedId, + long authorization, + String consumerId, + String groupId, + int timeout) + { + this.originId = originId; + this.routedId = routedId; + this.authorization = authorization; + this.consumerId = consumerId; + this.groupId = groupId; + this.timeout = timeout; + this.streams = new ArrayList<>(); + this.members = new Object2ObjectHashMap<>(); + this.partitionsByTopic = new Object2ObjectHashMap<>(); + this.assignment = new Object2ObjectHashMap<>(); + } + + private void doConsumerInitialBegin( + long traceId) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + KafkaGroupMemberMetadataFW metadata = kafkaGroupMemberMetadataRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .consumerId(consumerId) + .topics(t -> streams.forEach(s -> t.item(tp -> tp + .topic(s.topic) + .partitions(p -> s.partitions.forEach(sp -> + p.item(gtp -> gtp.partitionId(sp))))))) + .build(); + + this.receiver = newStream(this::onConsumerMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, 0L, + ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .group(g -> + g.groupId(groupId) + .protocol("highlander") + .timeout(timeout) + .metadataLen(metadata.sizeof()) + .metadata(metadata.buffer(), 0, metadata.sizeof())) + .build().sizeof())); + state = KafkaState.openingInitial(state); + } + } + + private void doConsumerInitialData( + long traceId, + long authorization, + long budgetId, + int reserved, + int flags, + DirectBuffer buffer, + int offset, + int limit, + Flyweight extension) + { + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, flags, reserved, buffer, offset, limit, extension); + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doConsumerInitialFlush( + long traceId, + Consumer extension) + { + doFlush(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, initialBud, 0, extension); + } + + private void doConsumerInitialEnd( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doConsumerInitialAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void onConsumerInitialReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert acknowledge >= this.initialAck; + + this.initialAck = acknowledge; + state = KafkaState.closedInitial(state); + + assert this.initialAck <= this.initialSeq; + + streams.forEach(m -> m.doConsumerInitialReset(traceId)); + + doConsumerReplyReset(traceId); + } + + + private void onConsumerInitialWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long authorization = window.authorization(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert acknowledge >= this.initialAck; + assert maximum >= this.initialMax; + + initialAck = acknowledge; + initialMax = maximum; + initialBud = budgetId; + state = KafkaState.openedInitial(state); + + assert initialAck <= initialSeq; + + streams.forEach(m -> m.doConsumerInitialWindow(authorization, traceId, budgetId, padding)); + } + + private void onConsumerMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConsumerReplyData(data); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onConsumerReplyFlush(flush); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerInitialWindow(window); + break; + default: + break; + } + } + + private void onConsumerReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + streams.forEach(m -> m.doConsumerReplyBegin(traceId, begin.extension())); + } + + private void onConsumerReplyFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + final long authorizationId = flush.authorization(); + final int reserved = flush.reserved(); + final OctetsFW extension = flush.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + replyAck = replySeq; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + final KafkaFlushExFW flushEx = extension.get(kafkaFlushExRO::tryWrap); + + if (flushEx != null) + { + KafkaGroupFlushExFW kafkaGroupFlushEx = flushEx.group(); + + leaderId = kafkaGroupFlushEx.leaderId().asString(); + memberId = kafkaGroupFlushEx.memberId().asString(); + + partitionsByTopic.clear(); + members.clear(); + + kafkaGroupFlushEx.members().forEach(m -> + { + final OctetsFW metadata = m.metadata(); + final KafkaGroupMemberMetadataFW groupMetadata = kafkaGroupMemberMetadataRO + .wrap(metadata.buffer(), metadata.offset(), metadata.limit()); + final String consumerId = kafkaGroupMemberMetadataRO.consumerId().asString(); + + groupMetadata.topics().forEach(mt -> + { + final String mId = m.id().asString(); + members.put(mId, consumerId); + + final String topic = mt.topic().asString(); + IntHashSet partitions = partitionsByTopic.computeIfAbsent(topic, s -> new IntHashSet()); + mt.partitions().forEach(p -> partitions.add(p.partitionId())); + }); + + }); + } + + doPartitionAssignment(traceId, authorization); + } + + private void onConsumerReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorizationId = data.authorization(); + final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW payload = data.payload(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + Array32FW topicAssignments = topicAssignmentsRO + .wrap(payload.buffer(), payload.offset(), payload.limit()); + + topicAssignments.forEach(ta -> + { + KafkaClientConsumerStream stream = + streams.stream().filter(s -> s.topic.equals(ta.topic().asString())).findFirst().get(); + + stream.doConsumerReplyData(traceId, flags, replyPad, EMPTY_OCTETS, + ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .consumer(c -> c.partitions(p -> ta + .partitions() + .forEach(np -> p.item(tp -> tp.partitionId(np.partitionId())))) + .assignments(a -> ta.userdata().forEach(u -> + a.item(ua -> ua.consumerId(u.consumerId()).partitions(p -> u.partitions() + .forEach(np -> p.item(tp -> tp.partitionId(np.partitionId())))))))) + .build() + .sizeof())); + }); + } + + private void onConsumerReplyEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + streams.forEach(s -> s.doConsumerReplyEnd(traceId)); + doConsumerInitialEnd(traceId); + } + + private void onConsumerReplyAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + streams.forEach(s -> s.cleanup(traceId)); + + doConsumerInitialAbort(traceId); + } + + private void doConsumerReplyReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization); + + state = KafkaState.closedReply(state); + } + } + + private void doConsumerReplyWindow( + long traceId, + long authorization, + long budgetId, + int padding) + { + replyAck = Math.max(replyAck - replyPad, 0); + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, padding + replyPad); + } + + private void doPartitionAssignment( + long traceId, + long authorization) + { + if (memberId.equals(leaderId)) + { + int memberSize = members.size(); + partitionsByTopic.forEach((t, p) -> + { + final int partitionSize = p.size(); + final int numberOfPartitionsPerMember = partitionSize / memberSize; + final int extraPartition = partitionSize % memberSize; + + int partitionIndex = 0; + int newPartitionPerTopic = numberOfPartitionsPerMember + extraPartition; + + for (String member : members.keySet()) + { + String consumerId = members.get(member); + List topicPartitions = assignment.computeIfAbsent( + member, tp -> new ArrayList<>()); + List partitions = new ArrayList<>(); + + for (; partitionIndex < newPartitionPerTopic; partitionIndex++) + { + partitions.add(p.iterator().next()); + } + topicPartitions.add(new TopicPartition(consumerId, t, partitions)); + + newPartitionPerTopic += numberOfPartitionsPerMember; + } + }); + } + + doMemberAssigment(traceId, authorization); + } + + private void doMemberAssigment( + long traceId, + long authorization) + { + if (!assignment.isEmpty()) + { + Array32FW assignment = memberAssignmentRW + .wrap(writeBuffer, DataFW.FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) + .item(ma -> this.assignment.forEach((k, v) -> + ma.memberId(k) + .assignments(ta -> v.forEach(tp -> ta.item(i -> + i.topic(tp.topic) + .partitions(p -> tp.partitions.forEach(t -> p.item(tpa -> tpa.partitionId(t)))) + .userdata(u -> + this.assignment.forEach((ak, av) -> + av.stream().filter(atp -> atp.topic.equals(tp.topic)).forEach(at -> + u.item(ud -> ud + .consumerId(at.consumerId) + .partitions(pt -> at.partitions.forEach(up -> + pt.item(pi -> pi.partitionId(up)))))))) + ))))) + .build(); + + doConsumerInitialData(traceId, authorization, initialBud, memberAssignmentRW.sizeof(), 3, + assignment.buffer(), assignment.offset(), assignment.sizeof(), EMPTY_OCTETS); + } + else + { + doConsumerInitialData(traceId, authorization, initialBud, memberAssignmentRW.sizeof(), 3, + EMPTY_OCTETS.buffer(), EMPTY_OCTETS.offset(), EMPTY_OCTETS.sizeof(), EMPTY_OCTETS); + } + } + } + + final class KafkaClientConsumerStream + { + private final KafkaClientConsumerFanout fanout; + private final MessageConsumer sender; + private final String topic; + private final List partitions; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final long authorization; + + private int state; + + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private long replyBud; + private int replyCap; + + KafkaClientConsumerStream( + KafkaClientConsumerFanout fanout, + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long affinity, + long authorization, + String topic, + List partitions) + { + this.fanout = fanout; + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.authorization = authorization; + this.topic = topic; + this.partitions = partitions; + } + + private void onConsumerMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerInitialBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConsumerInitialData(data); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onConsumerInitialFlush(flush); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerInitialEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerInitialAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerReplyWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerReplyReset(reset); + break; + default: + break; + } + } + + private void onConsumerInitialBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + state = KafkaState.openingInitial(state); + + assert initialAck <= initialSeq; + + fanout.streams.add(this); + + fanout.doConsumerInitialBegin(traceId); + } + + private void onConsumerInitialData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + } + + private void onConsumerInitialEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + } + + private void onConsumerInitialFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + final long traceId = flush.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + } + + private void onConsumerInitialAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = KafkaState.closedInitial(state); + + assert initialAck <= initialSeq; + + doConsumerReplyAbort(traceId); + fanout.streams.remove(this); + } + + private void doConsumerInitialReset( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization); + } + + state = KafkaState.closedInitial(state); + } + + private void doConsumerInitialWindow( + long authorization, + long traceId, + long budgetId, + int padding) + { + doWindow(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, padding); + } + + private void doConsumerReplyBegin( + long traceId, + OctetsFW extension) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, extension); + } + + private void doConsumerReplyData( + long traceId, + int flag, + int reserved, + OctetsFW payload, + Consumer extension) + { + doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBud, flag, reserved, payload, extension); + + replySeq += reserved; + } + + private void doConsumerReplyEnd( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doEnd(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void doConsumerReplyAbort( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doAbort(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_EXTENSION); + } + + state = KafkaState.closedReply(state); + } + + private void onConsumerReplyReset( + ResetFW reset) + { + final long sequence = reset.sequence(); + final long acknowledge = reset.acknowledge(); + final int maximum = reset.maximum(); + final long traceId = reset.traceId(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + cleanup(traceId); + } + + private void onConsumerReplyWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long authorizationId = window.authorization(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int capabilities = window.capabilities(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyBud = budgetId; + replyPad = padding; + replyCap = capabilities; + state = KafkaState.openedReply(state); + + assert replyAck <= replySeq; + + fanout.replyMax = replyMax; + fanout.doConsumerReplyWindow(traceId, authorizationId, budgetId, padding); + } + + private void cleanup( + long traceId) + { + doConsumerInitialReset(traceId); + doConsumerReplyAbort(traceId); + } + } + + final class TopicPartition + { + private final String consumerId; + private final String topic; + private final List partitions; + + TopicPartition( + String consumerId, + String topic, + List partitions) + { + this.consumerId = consumerId; + this.topic = topic; + this.partitions = partitions; + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java index 10adeb7c4a..289ebff849 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java @@ -61,12 +61,18 @@ public KafkaClientFactory( final KafkaClientGroupFactory clientGroupFactory = new KafkaClientGroupFactory( config, context, bindings::get, accountant::supplyDebitor); + final KafkaClientConsumerFactory clientConsumerFactory = new KafkaClientConsumerFactory( + config, context, bindings::get); + final KafkaClientFetchFactory clientFetchFactory = new KafkaClientFetchFactory( config, context, bindings::get, accountant::supplyDebitor, supplyClientRoute); final KafkaClientProduceFactory clientProduceFactory = new KafkaClientProduceFactory( config, context, bindings::get, supplyClientRoute); + final KafkaClientOffsetFetchFactory clientOffsetFetchFactory = new KafkaClientOffsetFetchFactory( + config, context, bindings::get, supplyClientRoute); + final KafkaMergedFactory clientMergedFactory = new KafkaMergedFactory( config, context, bindings::get, accountant.creditor()); @@ -74,8 +80,10 @@ public KafkaClientFactory( factories.put(KafkaBeginExFW.KIND_META, clientMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, clientDescribeFactory); factories.put(KafkaBeginExFW.KIND_GROUP, clientGroupFactory); + factories.put(KafkaBeginExFW.KIND_CONSUMER, clientConsumerFactory); factories.put(KafkaBeginExFW.KIND_FETCH, clientFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, clientProduceFactory); + factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, clientOffsetFetchFactory); factories.put(KafkaBeginExFW.KIND_MERGED, clientMergedFactory); this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index aba4326b47..8cfd2ae67c 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -19,9 +19,12 @@ import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; import static java.lang.System.currentTimeMillis; +import static java.nio.charset.StandardCharsets.UTF_8; +import java.nio.ByteOrder; import java.time.Duration; import java.util.ArrayList; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.function.Consumer; @@ -32,6 +35,7 @@ import org.agrona.MutableDirectBuffer; import org.agrona.collections.Long2ObjectHashMap; import org.agrona.collections.LongLongConsumer; +import org.agrona.collections.MutableInteger; import org.agrona.collections.Object2ObjectHashMap; import org.agrona.concurrent.UnsafeBuffer; @@ -40,11 +44,15 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ConfigResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.DescribeConfigsRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.DescribeConfigsResponseFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceResponseFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.AssignmentFW; @@ -61,6 +69,8 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.ProtocolMetadataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.SyncGroupRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.SyncGroupResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.MemberAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.TopicAssignmentFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; @@ -69,7 +79,9 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; @@ -86,12 +98,16 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp { private static final short ERROR_EXISTS = -1; private static final short ERROR_NONE = 0; + private static final short ERROR_COORDINATOR_NOT_AVAILABLE = 15; private static final short ERROR_NOT_COORDINATOR_FOR_CONSUMER = 16; private static final short ERROR_UNKNOWN_MEMBER = 25; private static final short ERROR_MEMBER_ID_REQUIRED = 79; private static final short ERROR_REBALANCE_IN_PROGRESS = 27; private static final short SIGNAL_NEXT_REQUEST = 1; + private static final short DESCRIBE_CONFIGS_API_KEY = 32; + private static final short DESCRIBE_CONFIGS_API_VERSION = 0; + private static final byte RESOURCE_TYPE_BROKER = 1; private static final short FIND_COORDINATOR_API_KEY = 10; private static final short FIND_COORDINATOR_API_VERSION = 1; private static final short JOIN_GROUP_API_KEY = 11; @@ -105,6 +121,8 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private static final String UNKNOWN_MEMBER_ID = ""; private static final String HIGHLANDER_PROTOCOL = "highlander"; + private static final String GROUP_MIN_SESSION_TIMEOUT = "group.min.session.timeout.ms"; + private static final String GROUP_MAX_SESSION_TIMEOUT = "group.max.session.timeout.ms"; private static final byte GROUP_KEY_TYPE = 0x00; private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); @@ -123,16 +141,21 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final BeginFW.Builder beginRW = new BeginFW.Builder(); private final DataFW.Builder dataRW = new DataFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); private final EndFW.Builder endRW = new EndFW.Builder(); private final AbortFW.Builder abortRW = new AbortFW.Builder(); private final ResetFW.Builder resetRW = new ResetFW.Builder(); private final WindowFW.Builder windowRW = new WindowFW.Builder(); private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder(); private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); + private final DescribeConfigsRequestFW.Builder describeConfigsRequestRW = new DescribeConfigsRequestFW.Builder(); + private final ResourceRequestFW.Builder resourceRequestRW = new ResourceRequestFW.Builder(); + private final String16FW.Builder configNameRW = new String16FW.Builder(ByteOrder.BIG_ENDIAN); private final FindCoordinatorRequestFW.Builder findCoordinatorRequestRW = new FindCoordinatorRequestFW.Builder(); private final JoinGroupRequestFW.Builder joinGroupRequestRW = new JoinGroupRequestFW.Builder(); private final ProtocolMetadataFW.Builder protocolMetadataRW = new ProtocolMetadataFW.Builder(); @@ -141,9 +164,11 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final HeartbeatRequestFW.Builder heartbeatRequestRW = new HeartbeatRequestFW.Builder(); private final LeaveGroupRequestFW.Builder leaveGroupRequestRW = new LeaveGroupRequestFW.Builder(); private final LeaveMemberFW.Builder leaveMemberRW = new LeaveMemberFW.Builder(); - private final ResourceRequestFW.Builder resourceRequestRW = new ResourceRequestFW.Builder(); + private final ResourceResponseFW resourceResponseRO = new ResourceResponseFW(); + private final ConfigResponseFW configResponseRO = new ConfigResponseFW(); private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW(); + private final DescribeConfigsResponseFW describeConfigsResponseRO = new DescribeConfigsResponseFW(); private final FindCoordinatorResponseFW findCoordinatorResponseRO = new FindCoordinatorResponseFW(); private final JoinGroupResponseFW joinGroupResponseRO = new JoinGroupResponseFW(); private final MemberMetadataFW memberMetadataRO = new MemberMetadataFW(); @@ -151,8 +176,18 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final HeartbeatResponseFW heartbeatResponseRO = new HeartbeatResponseFW(); private final LeaveGroupResponseFW leaveGroupResponseRO = new LeaveGroupResponseFW(); private final LeaveMemberFW leaveMemberRO = new LeaveMemberFW(); - private final ResourceResponseFW resourceResponseRO = new ResourceResponseFW(); - + private final Array32FW memberAssignmentRO = + new Array32FW<>(new MemberAssignmentFW()); + + private final KafkaDescribeClientDecoder decodeSaslHandshakeResponse = this::decodeSaslHandshakeResponse; + private final KafkaDescribeClientDecoder decodeSaslHandshake = this::decodeSaslHandshake; + private final KafkaDescribeClientDecoder decodeSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms; + private final KafkaDescribeClientDecoder decodeSaslHandshakeMechanism = this::decodeSaslHandshakeMechanism; + private final KafkaDescribeClientDecoder decodeSaslAuthenticateResponse = this::decodeSaslAuthenticateResponse; + private final KafkaDescribeClientDecoder decodeSaslAuthenticate = this::decodeSaslAuthenticate; + private final KafkaDescribeClientDecoder decodeDescribeResponse = this::decodeDescribeResponse; + private final KafkaDescribeClientDecoder decodeIgnoreAll = this::decodeIgnoreAll; + private final KafkaDescribeClientDecoder decodeReject = this::decodeReject; private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshakeResponse = this::decodeSaslHandshakeResponse; private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshake = this::decodeSaslHandshake; private final KafkaGroupClusterClientDecoder decodeClusterSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms; @@ -185,6 +220,7 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorIgnoreAll = this::decodeIgnoreAll; private final KafkaGroupCoordinatorClientDecoder decodeCoordinatorReject = this::decodeCoordinatorReject; + private final Map configs = new LinkedHashMap<>(); private final int kafkaTypeId; private final int proxyTypeId; private final MutableDirectBuffer writeBuffer; @@ -436,7 +472,7 @@ private void doData( receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); } - private void doDataNull( + private void doDataEmpty( MessageConsumer receiver, long originId, long routedId, @@ -461,12 +497,44 @@ private void doDataNull( .authorization(authorization) .budgetId(budgetId) .reserved(reserved) + .payload(EMPTY_OCTETS) .extension(extension) .build(); receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); } + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + private void doEnd( MessageConsumer receiver, long originId, @@ -592,6 +660,21 @@ int decode( int limit); } + @FunctionalInterface + private interface KafkaDescribeClientDecoder + { + int decode( + DescribeClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int progress, + int limit); + } + @FunctionalInterface private interface KafkaGroupCoordinatorClientDecoder { @@ -607,6 +690,117 @@ int decode( int limit); } + private int decodeDescribeResponse( + DescribeClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + final int responseSize = responseHeader.length(); + + if (length >= responseHeader.sizeof() + responseSize) + { + progress = responseHeader.limit(); + + final DescribeConfigsResponseFW describeConfigsResponse = + describeConfigsResponseRO.tryWrap(buffer, progress, limit); + + if (describeConfigsResponse == null) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + progress = describeConfigsResponse.limit(); + + final int resourceCount = describeConfigsResponse.resourceCount(); + for (int resourceIndex = 0; resourceIndex < resourceCount; resourceIndex++) + { + final ResourceResponseFW resource = resourceResponseRO.tryWrap(buffer, progress, limit); + if (resource == null) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + progress = resource.limit(); + + final String resourceName = resource.name().asString(); + final int resourceError = resource.errorCode(); + + client.onDecodeResource(traceId, client.authorization, resourceError, resourceName); + // TODO: use different decoder for configs + if (resourceError != ERROR_NONE || !client.delegate.nodeId.equals(resourceName)) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + final int configCount = resource.configCount(); + configs.clear(); + for (int configIndex = 0; configIndex < configCount; configIndex++) + { + final ConfigResponseFW config = configResponseRO.tryWrap(buffer, progress, limit); + if (config == null) + { + client.decoder = decodeIgnoreAll; + break decode; + } + + progress = config.limit(); + + final String name = config.name().asString(); + final String value = config.value().asString(); + + configs.put(name, value); + } + + client.onDecodeDescribeResponse(traceId, configs); + } + } + } + + if (client.decoder == decodeIgnoreAll) + { + client.cleanupNetwork(traceId); + } + + return progress; + } + + private int decodeReject( + DescribeClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + client.doNetworkResetIfNecessary(traceId); + client.decoder = decodeIgnoreAll; + return limit; + } + private int decodeFindCoordinatorResponse( ClusterClient client, long traceId, @@ -650,7 +844,7 @@ else if (findCoordinatorResponse.errorCode() == ERROR_COORDINATOR_NOT_AVAILABLE) } else if (findCoordinatorResponse.errorCode() == ERROR_NONE) { - client.onFindCoordinator(traceId, authorization, + client.onFindCoordinator(traceId, authorization, findCoordinatorResponse.nodeId(), findCoordinatorResponse.host(), findCoordinatorResponse.port()); } else @@ -787,7 +981,8 @@ else if (errorCode == ERROR_NONE) final MemberMetadataFW memberMetadata = memberMetadataRO.tryWrap(buffer, progress, limit); if (memberMetadata != null) { - client.members.add(memberMetadata.memberId().asString()); + client.members.add(new MemberProtocol( + memberMetadata.memberId().asString(), memberMetadata.metadata())); progress = memberMetadata.limit(); } else @@ -797,7 +992,7 @@ else if (errorCode == ERROR_NONE) } client.onJoinGroupResponse(traceId, authorization, joinGroupResponse.leader().asString(), - joinGroupResponse.memberId().asString(), errorCode); + joinGroupResponse.memberId().asString()); } else { @@ -1016,11 +1211,12 @@ private final class KafkaGroupStream { private final MessageConsumer application; private final ClusterClient clusterClient; + private final DescribeClient describeClient; private final CoordinatorClient coordinatorClient; private final GroupMembership groupMembership; private final String groupId; private final String protocol; - private final int timeout; + private int timeout; private final long originId; private final long routedId; private final long initialId; @@ -1028,6 +1224,10 @@ private final class KafkaGroupStream private final long affinity; private final long resolvedId; private final KafkaSaslConfig sasl; + public String host; + public int port; + private String nodeId; + private MutableDirectBuffer metadataBuffer; private int state; @@ -1041,6 +1241,7 @@ private final class KafkaGroupStream private int replyPad; private long replyBudgetId; + private int topicMetadataLimit; KafkaGroupStream( MessageConsumer application, @@ -1068,7 +1269,9 @@ private final class KafkaGroupStream this.groupMembership = groupMembership; this.sasl = sasl; this.clusterClient = new ClusterClient(routedId, resolvedId, sasl, this); + this.describeClient = new DescribeClient(routedId, resolvedId, sasl, this); this.coordinatorClient = new CoordinatorClient(routedId, resolvedId, sasl, this); + this.metadataBuffer = new UnsafeBuffer(new byte[2048]); } private void onApplication( @@ -1117,6 +1320,22 @@ private void onApplicationBegin( { final long traceId = begin.traceId(); final long authorization = begin.authorization(); + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()); + final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? + kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null; + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_GROUP; + final KafkaGroupBeginExFW kafkaGroupBeginEx = kafkaBeginEx.group(); + + OctetsFW metadata = kafkaGroupBeginEx.metadata(); + final int metadataSize = kafkaGroupBeginEx.metadataLen(); + + if (metadataSize > 0) + { + metadataBuffer.putBytes(0, metadata.buffer(), metadata.offset(), kafkaGroupBeginEx.metadataLen()); + topicMetadataLimit += metadataSize; + } state = KafkaState.openingInitial(state); @@ -1221,7 +1440,10 @@ private void doApplicationBegin( final KafkaBeginExFW kafkaBeginEx = kafkaBeginExRW.wrap(writeBuffer, BeginFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) - .group(g -> g.groupId(groupId).protocol(protocol).timeout(timeout)) + .group(g -> g + .groupId(groupId) + .protocol(protocol) + .timeout(timeout)) .build(); doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, @@ -1231,8 +1453,7 @@ private void doApplicationBegin( private void doApplicationData( long traceId, long authorization, - OctetsFW payload, - Consumer extension) + OctetsFW payload) { final int reserved = replyPad; @@ -1240,12 +1461,12 @@ private void doApplicationData( { doData(application, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, replyBudgetId, reserved, - payload.value(), payload.offset(), payload.sizeof(), extension); + payload.value(), payload.offset(), payload.sizeof(), EMPTY_EXTENSION); } else { - doDataNull(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, replyBudgetId, reserved, extension); + doDataEmpty(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, EMPTY_EXTENSION); } replySeq += reserved; @@ -1253,6 +1474,20 @@ private void doApplicationData( assert replyAck <= replySeq; } + private void doApplicationFlush( + long traceId, + long authorization, + Flyweight extension) + { + if (!KafkaState.replyClosed(state)) + { + final int reserved = replyPad; + + doFlush(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, extension); + } + } + private void doApplicationEnd( long traceId) { @@ -1338,7 +1573,8 @@ private void cleanupApplication( long traceId, int error) { - final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity()) + final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(writeBuffer, + ResetFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) .typeId(kafkaTypeId) .error(error) .build(); @@ -1980,12 +2216,17 @@ private void onCoordinatorNotAvailable( private void onFindCoordinator( long traceId, long authorization, + int nodeId, String16FW host, int port) { nextResponseId++; - delegate.coordinatorClient.doNetworkBeginIfNecessary(traceId, authorization, 0, host, port); + delegate.nodeId = String.valueOf(nodeId); + delegate.host = host.asString(); + delegate.port = port; + + delegate.describeClient.doNetworkBegin(traceId, authorization, 0); cleanupNetwork(traceId, authorization); } @@ -2033,18 +2274,15 @@ private void cleanupEncodeSlotIfNecessary() } } - private final class CoordinatorClient extends KafkaSaslClient + private final class DescribeClient extends KafkaSaslClient { private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; - private final LongLongConsumer encodeJoinGroupRequest = this::doEncodeJoinGroupRequest; - private final LongLongConsumer encodeSyncGroupRequest = this::doEncodeSyncGroupRequest; - private final LongLongConsumer encodeHeartbeatRequest = this::doEncodeHeartbeatRequest; - private final LongLongConsumer encodeLeaveGroupRequest = this::doEncodeLeaveGroupRequest; - private final List members; - private final KafkaGroupStream delegate; + private final LongLongConsumer encodeDescribeRequest = this::doEncodeDescribeRequest; private MessageConsumer network; + private final Map configs; + private final KafkaGroupStream delegate; private int state; private long authorization; @@ -2068,27 +2306,47 @@ private final class CoordinatorClient extends KafkaSaslClient private int decodeSlotReserved; private int nextResponseId; - private long heartbeatRequestId = NO_CANCEL_ID; - private String leader; - - private int generationId; - private KafkaGroupCoordinatorClientDecoder decoder; + private KafkaDescribeClientDecoder decoder; private LongLongConsumer encoder; - private OctetsFW assignment = EMPTY_OCTETS; - CoordinatorClient( + DescribeClient( long originId, long routedId, KafkaSaslConfig sasl, KafkaGroupStream delegate) { super(sasl, originId, routedId); - - this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeJoinGroupRequest; + this.configs = new LinkedHashMap<>(); this.delegate = delegate; - this.decoder = decodeCoordinatorReject; - this.members = new ArrayList<>(); + + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeDescribeRequest; + this.decoder = decodeReject; + + this.configs.put(GROUP_MIN_SESSION_TIMEOUT, null); + this.configs.put(GROUP_MAX_SESSION_TIMEOUT, null); + } + + public void onDecodeResource( + long traceId, + long authorization, + int errorCode, + String resource) + { + switch (errorCode) + { + case ERROR_NONE: + assert resource.equals(delegate.nodeId); + break; + default: + final KafkaResetExFW resetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .error(errorCode) + .build(); + delegate.cleanupApplication(traceId, resetEx); + doNetworkEnd(traceId, authorization); + break; + } } private void onNetwork( @@ -2161,7 +2419,7 @@ private void onNetworkData( if (replySeq > replyAck + replyMax) { - onError(traceId); + cleanupNetwork(traceId); } else { @@ -2172,7 +2430,7 @@ private void onNetworkData( if (decodeSlot == NO_SLOT) { - onError(traceId); + cleanupNetwork(traceId); } else { @@ -2204,9 +2462,13 @@ private void onNetworkEnd( cleanupDecodeSlotIfNecessary(); - if (!delegate.isApplicationReplyOpen()) + if (!KafkaState.replyOpened(delegate.state)) { - onError(traceId); + cleanupNetwork(traceId); + } + else if (decodeSlot == NO_SLOT) + { + delegate.doApplicationEnd(traceId); } } @@ -2217,7 +2479,7 @@ private void onNetworkAbort( state = KafkaState.closedReply(state); - onError(traceId); + cleanupNetwork(traceId); } private void onNetworkReset( @@ -2227,7 +2489,7 @@ private void onNetworkReset( state = KafkaState.closedInitial(state); - onError(traceId); + cleanupNetwork(traceId); } private void onNetworkWindow( @@ -2279,12 +2541,10 @@ private void onNetworkSignal( } } - private void doNetworkBeginIfNecessary( + private void doNetworkBegin( long traceId, long authorization, - long affinity, - String16FW host, - int port) + long affinity) { if (KafkaState.closed(state)) { @@ -2293,36 +2553,13 @@ private void doNetworkBeginIfNecessary( state = 0; } - if (!KafkaState.initialOpening(state)) - { - doNetworkBegin(traceId, authorization, affinity, host, port); - } - } - - private void doNetworkBegin( - long traceId, - long authorization, - long affinity, - String16FW host, - int port) - { this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); state = KafkaState.openingInitial(state); - Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) - .typeId(proxyTypeId) - .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) - .source("0.0.0.0") - .destination(host) - .sourcePort(0) - .destinationPort(port))) - .build() - .sizeof()); - network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, extension); + traceId, authorization, affinity, EMPTY_EXTENSION); } @Override @@ -2352,23 +2589,18 @@ private void doNetworkEnd( long traceId, long authorization) { - if (!KafkaState.initialClosed(state)) - { - state = KafkaState.closedInitial(state); - - doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, EMPTY_EXTENSION); - } + state = KafkaState.closedInitial(state); cleanupEncodeSlotIfNecessary(); + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); } - private void doNetworkAbort( + private void doNetworkAbortIfNecessary( long traceId) { - if (KafkaState.initialOpened(state) && - !KafkaState.initialClosed(state)) + if (!KafkaState.initialClosed(state)) { doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, EMPTY_EXTENSION); @@ -2378,7 +2610,7 @@ private void doNetworkAbort( cleanupEncodeSlotIfNecessary(); } - private void doNetworkReset( + private void doNetworkResetIfNecessary( long traceId) { if (!KafkaState.replyClosed(state)) @@ -2424,10 +2656,15 @@ private void doEncodeRequestIfNecessary( } } - private void doEncodeJoinGroupRequest( + private void doEncodeDescribeRequest( long traceId, long budgetId) { + if (KafkaConfiguration.DEBUG) + { + System.out.format("[client] %s DESCRIBE\n", delegate.nodeId); + } + final MutableDirectBuffer encodeBuffer = writeBuffer; final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; final int encodeLimit = encodeBuffer.capacity(); @@ -2436,36 +2673,37 @@ private void doEncodeJoinGroupRequest( final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) .length(0) - .apiKey(JOIN_GROUP_API_KEY) - .apiVersion(JOIN_GROUP_VERSION) + .apiKey(DESCRIBE_CONFIGS_API_KEY) + .apiVersion(DESCRIBE_CONFIGS_API_VERSION) .correlationId(0) - .clientId(clientId) + .clientId((String) null) .build(); encodeProgress = requestHeader.limit(); - final String memberId = delegate.groupMembership.memberIds.getOrDefault(delegate.groupId, UNKNOWN_MEMBER_ID); - - final JoinGroupRequestFW joinGroupRequest = - joinGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) - .groupId(delegate.groupId) - .sessionTimeoutMillis(delegate.timeout) - .rebalanceTimeoutMillis((int) rebalanceTimeout.toMillis()) - .memberId(memberId) - .groupInstanceId(delegate.groupMembership.instanceId) - .protocolType("consumer") - .protocolCount(1) + final DescribeConfigsRequestFW describeConfigsRequest = + describeConfigsRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .resourceCount(1) .build(); - encodeProgress = joinGroupRequest.limit(); + encodeProgress = describeConfigsRequest.limit(); - final ProtocolMetadataFW protocolMetadata = - protocolMetadataRW.wrap(encodeBuffer, encodeProgress, encodeLimit) - .name(delegate.protocol) - .metadata(EMPTY_OCTETS) + final ResourceRequestFW resourceRequest = resourceRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .type(RESOURCE_TYPE_BROKER) + .name(delegate.nodeId) + .configNamesCount(configs.size()) + .build(); + + encodeProgress = resourceRequest.limit(); + + for (String config : configs.keySet()) + { + final String16FW configName = configNameRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .set(config, UTF_8) .build(); - encodeProgress = protocolMetadata.limit(); + encodeProgress = configName.limit(); + } final int requestId = nextRequestId++; final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; @@ -2480,14 +2718,637 @@ private void doEncodeJoinGroupRequest( doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); - decoder = decodeJoinGroupResponse; - - delegate.doApplicationBeginIfNecessary(traceId, authorization); + decoder = decodeDescribeResponse; } - private void doEncodeSyncGroupRequest( + private void encodeNetwork( long traceId, - long budgetId) + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + final int maxLength = limit - offset; + final int initialWin = initialMax - (int)(initialSeq - initialAck); + final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + + if (length > 0) + { + final int reserved = length + initialPad; + + doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); + + initialSeq += reserved; + + assert initialAck <= initialSeq; + } + + final int remaining = maxLength - length; + if (remaining > 0) + { + if (encodeSlot == NO_SLOT) + { + encodeSlot = encodePool.acquire(initialId); + } + + if (encodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeSlotOffset = remaining; + } + } + else + { + cleanupEncodeSlotIfNecessary(); + } + } + + private void decodeNetwork( + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int limit) + { + KafkaDescribeClientDecoder previous = null; + int progress = offset; + while (progress <= limit && previous != decoder) + { + previous = decoder; + progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit); + } + + if (progress < limit) + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot); + decodeBuffer.putBytes(0, buffer, progress, limit - progress); + decodeSlotOffset = limit - progress; + decodeSlotReserved = (limit - progress) * reserved / (limit - offset); + } + + doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax); + } + else + { + cleanupDecodeSlotIfNecessary(); + + if (reserved > 0) + { + doNetworkWindow(traceId, budgetId, 0, 0, replyMax); + } + } + } + + @Override + protected void doDecodeSaslHandshakeResponse( + long traceId) + { + decoder = decodeSaslHandshakeResponse; + } + + @Override + protected void doDecodeSaslHandshake( + long traceId) + { + decoder = decodeSaslHandshake; + } + + @Override + protected void doDecodeSaslHandshakeMechanisms( + long traceId) + { + decoder = decodeSaslHandshakeMechanisms; + } + + @Override + protected void doDecodeSaslHandshakeMechansim( + long traceId) + { + decoder = decodeSaslHandshakeMechanism; + } + + @Override + protected void doDecodeSaslAuthenticateResponse( + long traceId) + { + decoder = decodeSaslAuthenticateResponse; + } + + @Override + protected void doDecodeSaslAuthenticate( + long traceId) + { + decoder = decodeSaslAuthenticate; + } + + @Override + protected void onDecodeSaslHandshakeResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeSaslAuthenticateRequest; + decoder = decodeSaslAuthenticateResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslAuthenticateResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + encoder = encodeDescribeRequest; + decoder = decodeDescribeResponse; + break; + default: + delegate.cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslResponse( + long traceId) + { + nextResponseId++; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onDecodeDescribeResponse( + long traceId, + Map newConfigs) + { + nextResponseId++; + + int timeoutMin = Integer.valueOf(newConfigs.get(GROUP_MIN_SESSION_TIMEOUT)).intValue(); + int timeoutMax = Integer.valueOf(newConfigs.get(GROUP_MAX_SESSION_TIMEOUT)).intValue(); + if (delegate.timeout < timeoutMin) + { + delegate.timeout = timeoutMin; + } + else if (delegate.timeout > timeoutMax) + { + delegate.timeout = timeoutMax; + } + + delegate.coordinatorClient.doNetworkBeginIfNecessary(traceId, authorization, 0); + + cleanupNetwork(traceId); + } + + private void cleanupNetwork( + long traceId) + { + doNetworkResetIfNecessary(traceId); + doNetworkAbortIfNecessary(traceId); + } + + private void cleanupDecodeSlotIfNecessary() + { + if (decodeSlot != NO_SLOT) + { + decodePool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + decodeSlotReserved = 0; + } + } + + private void cleanupEncodeSlotIfNecessary() + { + if (encodeSlot != NO_SLOT) + { + encodePool.release(encodeSlot); + encodeSlot = NO_SLOT; + encodeSlotOffset = 0; + encodeSlotTraceId = 0; + } + } + } + + private final class CoordinatorClient extends KafkaSaslClient + { + private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; + private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; + private final LongLongConsumer encodeJoinGroupRequest = this::doEncodeJoinGroupRequest; + private final LongLongConsumer encodeSyncGroupRequest = this::doEncodeSyncGroupRequest; + private final LongLongConsumer encodeHeartbeatRequest = this::doEncodeHeartbeatRequest; + private final LongLongConsumer encodeLeaveGroupRequest = this::doEncodeLeaveGroupRequest; + private final List members; + private final KafkaGroupStream delegate; + + private MessageConsumer network; + + private int state; + private long authorization; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + private long initialBudgetId; + + private long replySeq; + private long replyAck; + private int replyMax; + + private int encodeSlot = NO_SLOT; + private int encodeSlotOffset; + private long encodeSlotTraceId; + + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + private int decodeSlotReserved; + + private int nextResponseId; + private long heartbeatRequestId = NO_CANCEL_ID; + + private String leader; + + private int generationId; + private KafkaGroupCoordinatorClientDecoder decoder; + private LongLongConsumer encoder; + private OctetsFW assignment = EMPTY_OCTETS; + + CoordinatorClient( + long originId, + long routedId, + KafkaSaslConfig sasl, + KafkaGroupStream delegate) + { + super(sasl, originId, routedId); + + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeJoinGroupRequest; + this.delegate = delegate; + this.decoder = decodeCoordinatorReject; + this.members = new ArrayList<>(); + } + + private void onNetwork( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onNetworkBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onNetworkData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onNetworkEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onNetworkAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onNetworkReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onNetworkWindow(window); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onNetworkSignal(signal); + break; + default: + break; + } + } + + private void onNetworkBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + authorization = begin.authorization(); + state = KafkaState.openingReply(state); + + doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity()); + } + + private void onNetworkData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long budgetId = data.budgetId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + data.reserved(); + authorization = data.authorization(); + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + onError(traceId); + } + else + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + onError(traceId); + } + else + { + final OctetsFW payload = data.payload(); + int reserved = data.reserved(); + int offset = payload.offset(); + int limit = payload.limit(); + + final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot); + buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset); + decodeSlotOffset += limit - offset; + decodeSlotReserved += reserved; + + offset = 0; + limit = decodeSlotOffset; + reserved = decodeSlotReserved; + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + } + + private void onNetworkEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + cleanupDecodeSlotIfNecessary(); + + if (!delegate.isApplicationReplyOpen()) + { + onError(traceId); + } + } + + private void onNetworkAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + onError(traceId); + } + + private void onNetworkReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + onError(traceId); + } + + private void onNetworkWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= initialSeq; + assert acknowledge >= initialAck; + assert maximum + acknowledge >= initialMax + initialAck; + + this.initialAck = acknowledge; + this.initialMax = maximum; + this.initialPad = padding; + this.initialBudgetId = budgetId; + + assert initialAck <= initialSeq; + + this.authorization = window.authorization(); + + state = KafkaState.openedInitial(state); + + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); + final int limit = encodeSlotOffset; + + encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); + } + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void onNetworkSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final int signalId = signal.signalId(); + + if (signalId == SIGNAL_NEXT_REQUEST) + { + doEncodeRequestIfNecessary(traceId, initialBudgetId); + } + } + + private void doNetworkBeginIfNecessary( + long traceId, + long authorization, + long affinity) + { + if (KafkaState.closed(state)) + { + replyAck = 0; + replySeq = 0; + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + doNetworkBegin(traceId, authorization, affinity); + } + } + + private void doNetworkBegin( + long traceId, + long authorization, + long affinity) + { + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + state = KafkaState.openingInitial(state); + + Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) + .typeId(proxyTypeId) + .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) + .source("0.0.0.0") + .destination(delegate.host) + .sourcePort(0) + .destinationPort(delegate.port))) + .build() + .sizeof()); + + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, extension); + } + + @Override + protected void doNetworkData( + long traceId, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset); + encodeSlotOffset += limit - offset; + encodeSlotTraceId = traceId; + + buffer = encodeBuffer; + offset = 0; + limit = encodeSlotOffset; + } + + encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit); + } + + private void doNetworkEnd( + long traceId, + long authorization) + { + if (!KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + } + + cleanupEncodeSlotIfNecessary(); + + } + + private void doNetworkAbort( + long traceId) + { + if (KafkaState.initialOpened(state) && + !KafkaState.initialClosed(state)) + { + doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + state = KafkaState.closedInitial(state); + } + + cleanupEncodeSlotIfNecessary(); + } + + private void doNetworkReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + state = KafkaState.closedReply(state); + } + + cleanupDecodeSlotIfNecessary(); + } + + private void doNetworkWindow( + long traceId, + long budgetId, + int minReplyNoAck, + int minReplyPad, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, minReplyPad); + } + } + + private void doEncodeRequestIfNecessary( + long traceId, + long budgetId) + { + if (nextRequestId == nextResponseId) + { + encoder.accept(traceId, budgetId); + } + } + + private void doEncodeJoinGroupRequest( + long traceId, + long budgetId) { final MutableDirectBuffer encodeBuffer = writeBuffer; final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; @@ -2496,6 +3357,67 @@ private void doEncodeSyncGroupRequest( int encodeProgress = encodeOffset; final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(JOIN_GROUP_API_KEY) + .apiVersion(JOIN_GROUP_VERSION) + .correlationId(0) + .clientId(clientId) + .build(); + + encodeProgress = requestHeader.limit(); + + final String memberId = delegate.groupMembership.memberIds.getOrDefault(delegate.groupId, UNKNOWN_MEMBER_ID); + + final JoinGroupRequestFW joinGroupRequest = + joinGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .groupId(delegate.groupId) + .sessionTimeoutMillis(delegate.timeout) + .rebalanceTimeoutMillis((int) rebalanceTimeout.toMillis()) + .memberId(memberId) + .groupInstanceId(delegate.groupMembership.instanceId) + .protocolType("consumer") + .protocolCount(1) + .build(); + + encodeProgress = joinGroupRequest.limit(); + + final ProtocolMetadataFW protocolMetadata = + protocolMetadataRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .name(delegate.protocol) + .metadata(delegate.metadataBuffer, 0, delegate.topicMetadataLimit) + .build(); + + encodeProgress = protocolMetadata.limit(); + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeJoinGroupResponse; + + delegate.doApplicationBeginIfNecessary(traceId, authorization); + } + + private void doEncodeSyncGroupRequest( + long traceId, + long budgetId) + { + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + MutableInteger encodeProgress = new MutableInteger(encodeOffset); + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .length(0) .apiKey(SYNC_GROUP_API_KEY) .apiVersion(SYNC_GROUP_VERSION) @@ -2503,39 +3425,55 @@ private void doEncodeSyncGroupRequest( .clientId(clientId) .build(); - encodeProgress = requestHeader.limit(); + encodeProgress.set(requestHeader.limit()); final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); - final boolean isLeader = leader.equals(memberId); - final SyncGroupRequestFW syncGroupRequest = - syncGroupRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + syncGroupRequestRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .groupId(delegate.groupId) .generatedId(generationId) .memberId(memberId) .groupInstanceId(delegate.groupMembership.instanceId) - .assignmentCount(isLeader ? members.size() : 0) + .assignmentCount(members.size()) .build(); - encodeProgress = syncGroupRequest.limit(); + encodeProgress.set(syncGroupRequest.limit()); - if (isLeader) + if (assignment.sizeof() > 0) { - for (int i = 0; i < members.size(); i++) + Array32FW assignments = memberAssignmentRO + .wrap(assignment.buffer(), assignment.offset(), assignment.limit()); + + assignments.forEach(a -> { + Array32FW topicPartitions = a.assignments(); final AssignmentFW groupAssignment = - assignmentRW.wrap(encodeBuffer, encodeProgress, encodeLimit) - .memberId(members.get(i)) - .value(assignment) + assignmentRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .memberId(a.memberId()) + .value(topicPartitions.buffer(), topicPartitions.offset(), topicPartitions.length()) .build(); - encodeProgress = groupAssignment.limit(); - } + encodeProgress.set(groupAssignment.limit()); + }); + } + else + { + members.forEach(m -> + { + final AssignmentFW groupAssignment = + assignmentRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .memberId(m.memberId) + .value(m.metadata) + .build(); + + encodeProgress.set(groupAssignment.limit()); + }); } + final int requestId = nextRequestId++; - final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + final int requestSize = encodeProgress.get() - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) .length(requestSize) @@ -2545,7 +3483,7 @@ private void doEncodeSyncGroupRequest( .clientId(requestHeader.clientId().asString()) .build(); - doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress.get()); decoder = decodeSyncGroupResponse; } @@ -2886,16 +3824,6 @@ private void onNotCoordinatorError( delegate.onNotCoordinatorError(traceId, authorization); } - private void onJoinGroupUnknownMemberError( - long traceId, - long authorization) - { - nextResponseId++; - - delegate.groupMembership.memberIds.put(delegate.groupId, UNKNOWN_MEMBER_ID); - signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); - } - private void onJoinGroupMemberIdError( long traceId, long authorization, @@ -2910,18 +3838,36 @@ private void onJoinGroupMemberIdError( private void onJoinGroupResponse( long traceId, long authorization, - String leader, - String memberId, - int error) + String leaderId, + String memberId) { nextResponseId++; - this.leader = leader; + this.leader = leaderId; delegate.groupMembership.memberIds.put(delegate.groupId, memberId); + final KafkaFlushExFW kafkaFlushEx = + kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) + .typeId(kafkaTypeId) + .group(g -> g.leaderId(leaderId) + .memberId(memberId) + .members(gm -> members.forEach(m -> + gm.item(i -> + { + KafkaGroupMemberFW.Builder member = i.id(m.memberId); + if (m.metadata.sizeof() > 0) + { + member.metadataLen(m.metadata.sizeof()) + .metadata(m.metadata) + .build(); + } + })))) + .build(); + + delegate.doApplicationFlush(traceId, authorization, kafkaFlushEx); + encoder = encodeSyncGroupRequest; - signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); } private void onSynGroupRebalance( @@ -2941,16 +3887,9 @@ private void onSyncGroupResponse( { nextResponseId++; - final String memberId = delegate.groupMembership.memberIds.get(delegate.groupId); - - delegate.doApplicationData(traceId, authorization, assignment, - ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) - .typeId(kafkaTypeId) - .group(g -> g.leaderId(leader).memberId(memberId).members(members.size())) - .build() - .sizeof())); + delegate.doApplicationData(traceId, authorization, assignment); - if (heartbeatRequestId != NO_CANCEL_ID) + if (heartbeatRequestId == NO_CANCEL_ID) { encoder = encodeHeartbeatRequest; @@ -3052,4 +3991,19 @@ private final class GroupMembership this.memberIds = new Object2ObjectHashMap<>(); } } + + private final class MemberProtocol + { + private final String memberId; + private final OctetsFW metadata; + + MemberProtocol( + String memberId, + OctetsFW metadata) + { + + this.memberId = memberId; + this.metadata = metadata; + } + } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java new file mode 100644 index 0000000000..b072755903 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java @@ -0,0 +1,1644 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; +import static java.util.Objects.requireNonNull; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.LongFunction; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Int2ObjectHashMap; +import org.agrona.collections.LongLongConsumer; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchPartitionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchTopicRequestFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.OffsetFetchTopicResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.PartitionIndexFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetFetchBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; + + +public final class KafkaClientOffsetFetchFactory extends KafkaClientSaslHandshaker implements BindingHandler +{ + private static final int ERROR_NONE = 0; + + private static final int SIGNAL_NEXT_REQUEST = 1; + + private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0); + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + private static final short OFFSET_FETCH_API_KEY = 9; + private static final short OFFSET_FETCH_API_VERSION = 0; + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final SignalFW signalRO = new SignalFW(); + private final ExtensionFW extensionRO = new ExtensionFW(); + private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); + private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); + private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); + + private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); + private final OffsetFetchRequestFW.Builder offsetFetchRequestRW = new OffsetFetchRequestFW.Builder(); + private final OffsetFetchTopicRequestFW.Builder offsetFetchTopicRequestRW = new OffsetFetchTopicRequestFW.Builder(); + private final PartitionIndexFW.Builder partitionIndexRW = new PartitionIndexFW.Builder(); + + private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW(); + private final OffsetFetchResponseFW offsetFetchResponseRO = new OffsetFetchResponseFW(); + private final OffsetFetchTopicResponseFW offsetFetchTopicResponseRO = new OffsetFetchTopicResponseFW(); + private final OffsetFetchPartitionFW offsetFetchPartitionRO = new OffsetFetchPartitionFW(); + + private final KafkaOffsetFetchClientDecoder decodeSaslHandshakeResponse = this::decodeSaslHandshakeResponse; + private final KafkaOffsetFetchClientDecoder decodeSaslHandshake = this::decodeSaslHandshake; + private final KafkaOffsetFetchClientDecoder decodeSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms; + private final KafkaOffsetFetchClientDecoder decodeSaslHandshakeMechanism = this::decodeSaslHandshakeMechanism; + private final KafkaOffsetFetchClientDecoder decodeSaslAuthenticateResponse = this::decodeSaslAuthenticateResponse; + private final KafkaOffsetFetchClientDecoder decodeSaslAuthenticate = this::decodeSaslAuthenticate; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchResponse = this::decodeOffsetFetchResponse; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchTopics = this::decodeOffsetFetchTopics; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchTopic = this::decodeOffsetFetchTopic; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchPartitions = this::decodeOffsetFetchPartitions; + private final KafkaOffsetFetchClientDecoder decodeOffsetFetchPartition = this::decodeOffsetFetchPartition; + + private final KafkaOffsetFetchClientDecoder decodeIgnoreAll = this::decodeIgnoreAll; + private final KafkaOffsetFetchClientDecoder decodeReject = this::decodeReject; + + private final int kafkaTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final BufferPool decodePool; + private final BufferPool encodePool; + private final Signaler signaler; + private final BindingHandler streamFactory; + private final LongFunction supplyBinding; + private final LongFunction supplyClientRoute; + + public KafkaClientOffsetFetchFactory( + KafkaConfiguration config, + EngineContext context, + LongFunction supplyBinding, + LongFunction supplyClientRoute) + { + super(config, context); + this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); + this.signaler = context.signaler(); + this.streamFactory = context.streamFactory(); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.decodePool = context.bufferPool(); + this.encodePool = context.bufferPool(); + this.supplyBinding = supplyBinding; + this.supplyClientRoute = supplyClientRoute; + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer application) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long affinity = begin.affinity(); + final long authorization = begin.authorization(); + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()); + final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? + kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null; + + assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_OFFSET_FETCH; + final KafkaOffsetFetchBeginExFW kafkaOffsetFetchBeginEx = kafkaBeginEx.offsetFetch(); + final String groupId = kafkaOffsetFetchBeginEx.groupId().asString(); + List topics = new ArrayList<>(); + kafkaOffsetFetchBeginEx.topics().forEach(t -> + { + List partitions = new ArrayList<>(); + t.partitions().forEach(p -> partitions.add(p.partitionId())); + topics.add(new KafkaOffsetFetchTopic(t.topic().asString(), partitions)); + }); + + + MessageConsumer newStream = null; + + final KafkaBindingConfig binding = supplyBinding.apply(routedId); + final KafkaRouteConfig resolved = binding != null ? + binding.resolve(authorization, null, groupId) : null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + final KafkaSaslConfig sasl = binding.sasl(); + + newStream = new KafkaOffsetFetchStream( + application, + originId, + routedId, + initialId, + affinity, + resolvedId, + groupId, + topics, + sasl)::onApplication; + } + + return newStream; + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer payload, + int offset, + int length, + Consumer extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload, offset, length) + .extension(extension) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doDataNull( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + @FunctionalInterface + private interface KafkaOffsetFetchClientDecoder + { + int decode( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int progress, + int limit); + } + + private int decodeOffsetFetchResponse( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit); + if (responseHeader == null) + { + break decode; + } + + progress = responseHeader.limit(); + + client.decodeableResponseBytes = responseHeader.length(); + + final OffsetFetchResponseFW offsetFetchResponse = offsetFetchResponseRO.tryWrap(buffer, progress, limit); + if (offsetFetchResponse == null) + { + break decode; + } + + progress = offsetFetchResponse.limit(); + + client.decodeableResponseBytes -= offsetFetchResponse.sizeof(); + assert client.decodeableResponseBytes >= 0; + + client.decodeableTopics = offsetFetchResponse.topicCount(); + client.decoder = decodeOffsetFetchTopics; + } + + return progress; + } + + private int decodeOffsetFetchTopics( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + if (client.decodeableTopics == 0) + { + assert client.decodeableResponseBytes == 0; + + client.decoder = decodeOffsetFetchResponse; + } + else + { + client.decoder = decodeOffsetFetchTopic; + } + + return progress; + } + + private int decodeOffsetFetchTopic( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final OffsetFetchTopicResponseFW topicOffsetFetch = offsetFetchTopicResponseRO.tryWrap(buffer, progress, limit); + if (topicOffsetFetch == null) + { + break decode; + } + + final String topic = topicOffsetFetch.name().asString(); + + client.onDecodeTopic(traceId, authorization, topic); + + progress = topicOffsetFetch.limit(); + + client.decodeableResponseBytes -= topicOffsetFetch.sizeof(); + assert client.decodeableResponseBytes >= 0; + + client.decodeablePartitions = topicOffsetFetch.partitionCount(); + client.decoder = decodeOffsetFetchPartitions; + } + + return progress; + } + + private int decodeOffsetFetchPartitions( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + if (client.decodeablePartitions == 0) + { + client.decodeableTopics--; + assert client.decodeableTopics >= 0; + + client.decoder = decodeOffsetFetchTopics; + client.onDecodeOffsetFetchResponse(traceId); + } + else + { + client.decoder = decodeOffsetFetchPartition; + } + + return progress; + } + + private int decodeOffsetFetchPartition( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + final int length = limit - progress; + + decode: + if (length != 0) + { + final OffsetFetchPartitionFW partition = offsetFetchPartitionRO.tryWrap(buffer, progress, limit); + if (partition == null) + { + break decode; + } + + final int partitionError = partition.errorCode(); + final int partitionId = partition.partitionIndex(); + final long offsetCommitted = partition.committedOffset(); + + client.onDecodePartition(traceId, partitionId, offsetCommitted, partitionError); + + progress = partition.limit(); + + client.decodeableResponseBytes -= partition.sizeof(); + assert client.decodeableResponseBytes >= 0; + + client.decodeablePartitions--; + assert client.decodeablePartitions >= 0; + + client.decoder = decodeOffsetFetchPartitions; + } + + return progress; + } + + private int decodeReject( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + client.doNetworkResetIfNecessary(traceId); + client.decoder = decodeIgnoreAll; + return limit; + } + + private int decodeIgnoreAll( + KafkaOffsetFetchStream.KafkaOffsetFetchClient client, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int progress, + int limit) + { + return limit; + } + + private final class KafkaOffsetFetchStream + { + private final MessageConsumer application; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private final long affinity; + private final KafkaOffsetFetchClient client; + private final KafkaClientRoute clientRoute; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private long replyBudgetId; + + KafkaOffsetFetchStream( + MessageConsumer application, + long originId, + long routedId, + long initialId, + long affinity, + long resolvedId, + String groupId, + List topics, + KafkaSaslConfig sasl) + { + this.application = application; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + this.clientRoute = supplyClientRoute.apply(resolvedId); + this.client = new KafkaOffsetFetchClient(routedId, resolvedId, groupId, topics, sasl); + } + + private void onApplication( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onApplicationBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onApplicationData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onApplicationEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onApplicationAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onApplicationWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onApplicationReset(reset); + break; + default: + break; + } + } + + private void onApplicationBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + + state = KafkaState.openingInitial(state); + + client.doNetworkBegin(traceId, authorization, affinity); + } + + private void onApplicationData( + DataFW data) + { + final long traceId = data.traceId(); + + client.cleanupNetwork(traceId); + } + + private void onApplicationEnd( + EndFW end) + { + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + state = KafkaState.closedInitial(state); + + client.doNetworkEnd(traceId, authorization); + } + + private void onApplicationAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedInitial(state); + + client.doNetworkAbortIfNecessary(traceId); + } + + private void onApplicationWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + this.replyAck = acknowledge; + this.replyMax = maximum; + this.replyPad = padding; + this.replyBudgetId = budgetId; + + assert replyAck <= replySeq; + } + + private void onApplicationReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + client.doNetworkResetIfNecessary(traceId); + } + + private boolean isApplicationReplyOpen() + { + return KafkaState.replyOpening(state); + } + + private void doApplicationBeginIfNecessary( + long traceId, + long authorization) + { + if (!KafkaState.replyOpening(state)) + { + doApplicationBegin(traceId, authorization); + } + } + + private void doApplicationBegin( + long traceId, + long authorization) + { + state = KafkaState.openingReply(state); + + doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + private void doApplicationData( + long traceId, + long authorization, + KafkaDataExFW extension) + { + final int reserved = replyPad; + + doDataNull(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBudgetId, reserved, extension); + + replySeq += reserved; + + assert replyAck <= replySeq; + } + + private void doApplicationEnd( + long traceId) + { + state = KafkaState.closedReply(state); + //client.stream = nullIfClosed(state, client.stream); + doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, client.authorization, EMPTY_EXTENSION); + } + + private void doApplicationAbort( + long traceId) + { + state = KafkaState.closedReply(state); + //client.stream = nullIfClosed(state, client.stream); + doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, client.authorization, EMPTY_EXTENSION); + } + + private void doApplicationWindow( + long traceId, + long budgetId, + int minInitialNoAck, + int minInitialPad, + int minInitialMax) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !KafkaState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + state = KafkaState.openedInitial(state); + + doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, client.authorization, budgetId, minInitialPad); + } + } + + private void doApplicationReset( + long traceId, + Flyweight extension) + { + state = KafkaState.closedInitial(state); + //client.stream = nullIfClosed(state, client.stream); + + doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, client.authorization, extension); + } + + private void doApplicationAbortIfNecessary( + long traceId) + { + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + { + doApplicationAbort(traceId); + } + } + + private void doApplicationResetIfNecessary( + long traceId, + Flyweight extension) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + doApplicationReset(traceId, extension); + } + } + + private void cleanupApplication( + long traceId, + int error) + { + final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .error(error) + .build(); + + cleanupApplication(traceId, kafkaResetEx); + } + + private void cleanupApplication( + long traceId, + Flyweight extension) + { + doApplicationResetIfNecessary(traceId, extension); + doApplicationAbortIfNecessary(traceId); + } + + private final class KafkaOffsetFetchClient extends KafkaSaslClient + { + private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest; + private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest; + private final LongLongConsumer encodeOffsetFetchRequest = this::doEncodeOffsetFetchRequest; + + private final String groupId; + private final List topics; + private final Int2ObjectHashMap topicPartitions; + private String newTopic; + + private MessageConsumer network; + private int state; + private int decodeableResponseBytes; + private int decodeableTopics; + private int decodeablePartitions; + private long authorization; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + private long initialBudgetId; + + private long replySeq; + private long replyAck; + private int replyMax; + + private int encodeSlot = NO_SLOT; + private int encodeSlotOffset; + private long encodeSlotTraceId; + + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + private int decodeSlotReserved; + + private int nextResponseId; + + private KafkaOffsetFetchClientDecoder decoder; + private LongLongConsumer encoder; + + KafkaOffsetFetchClient( + long originId, + long routedId, + String groupId, + List topics, + KafkaSaslConfig sasl) + { + super(sasl, originId, routedId); + this.groupId = requireNonNull(groupId); + this.topics = topics; + this.topicPartitions = new Int2ObjectHashMap<>(); + + this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeOffsetFetchRequest; + this.decoder = decodeReject; + } + + private void onNetwork( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onNetworkBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onNetworkData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onNetworkEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onNetworkAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onNetworkReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onNetworkWindow(window); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onNetworkSignal(signal); + break; + default: + break; + } + } + + private void onNetworkBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + authorization = begin.authorization(); + state = KafkaState.openingReply(state); + + doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity()); + } + + private void onNetworkData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long budgetId = data.budgetId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + data.reserved(); + authorization = data.authorization(); + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + cleanupNetwork(traceId); + } + else + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final OctetsFW payload = data.payload(); + int reserved = data.reserved(); + int offset = payload.offset(); + int limit = payload.limit(); + + final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot); + buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset); + decodeSlotOffset += limit - offset; + decodeSlotReserved += reserved; + + offset = 0; + limit = decodeSlotOffset; + reserved = decodeSlotReserved; + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + } + + private void onNetworkEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + cleanupDecodeSlotIfNecessary(); + + if (!isApplicationReplyOpen()) + { + cleanupNetwork(traceId); + } + else if (decodeSlot == NO_SLOT) + { + doApplicationEnd(traceId); + } + } + + private void onNetworkAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + cleanupNetwork(traceId); + } + + private void onNetworkReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + cleanupNetwork(traceId); + } + + private void onNetworkWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= initialSeq; + assert acknowledge >= initialAck; + assert maximum + acknowledge >= initialMax + initialAck; + + this.initialAck = acknowledge; + this.initialMax = maximum; + this.initialPad = padding; + this.initialBudgetId = budgetId; + + assert initialAck <= initialSeq; + + this.authorization = window.authorization(); + + state = KafkaState.openedInitial(state); + + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); + final int limit = encodeSlotOffset; + + encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); + } + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void onNetworkSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final int signalId = signal.signalId(); + + if (signalId == SIGNAL_NEXT_REQUEST) + { + doEncodeRequestIfNecessary(traceId, initialBudgetId); + } + } + + private void doNetworkBegin( + long traceId, + long authorization, + long affinity) + { + state = KafkaState.openingInitial(state); + + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, affinity, EMPTY_EXTENSION); + } + + @Override + protected void doNetworkData( + long traceId, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset); + encodeSlotOffset += limit - offset; + encodeSlotTraceId = traceId; + + buffer = encodeBuffer; + offset = 0; + limit = encodeSlotOffset; + } + + encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit); + } + + private void doNetworkEnd( + long traceId, + long authorization) + { + state = KafkaState.closedInitial(state); + + cleanupEncodeSlotIfNecessary(); + + doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + } + + private void doNetworkAbortIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + state = KafkaState.closedInitial(state); + } + + cleanupEncodeSlotIfNecessary(); + } + + private void doNetworkResetIfNecessary( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + state = KafkaState.closedReply(state); + } + + cleanupDecodeSlotIfNecessary(); + } + + private void doNetworkWindow( + long traceId, + long budgetId, + int minReplyNoAck, + int minReplyPad, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, minReplyPad); + } + } + + private void doEncodeRequestIfNecessary( + long traceId, + long budgetId) + { + if (nextRequestId == nextResponseId) + { + encoder.accept(traceId, budgetId); + } + } + + private void doEncodeOffsetFetchRequest( + long traceId, + long budgetId) + { + if (KafkaConfiguration.DEBUG) + { + System.out.format("[client] %s OFFSET FETCH\n", groupId); + } + + final MutableDirectBuffer encodeBuffer = writeBuffer; + final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD; + final int encodeLimit = encodeBuffer.capacity(); + + int encodeProgress = encodeOffset; + + final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .length(0) + .apiKey(OFFSET_FETCH_API_KEY) + .apiVersion(OFFSET_FETCH_API_VERSION) + .correlationId(0) + .clientId((String) null) + .build(); + + encodeProgress = requestHeader.limit(); + + final OffsetFetchRequestFW offsetFetchRequest = + offsetFetchRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .groupId(groupId) + .topicCount(topics.size()) + .build(); + + encodeProgress = offsetFetchRequest.limit(); + + for (KafkaOffsetFetchTopic topic: topics) + { + final OffsetFetchTopicRequestFW offsetFetchTopicRequest = + offsetFetchTopicRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .topic(topic.topic) + .partitionsCount(topic.partitions.size()) + .build(); + encodeProgress = offsetFetchTopicRequest.limit(); + + for (Integer partition : topic.partitions) + { + final PartitionIndexFW partitionIndex = + partitionIndexRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .index(partition) + .build(); + encodeProgress = partitionIndex.limit(); + } + + } + + final int requestId = nextRequestId++; + final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY; + + requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit()) + .length(requestSize) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId().asString()) + .build(); + + doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress); + + decoder = decodeOffsetFetchResponse; + } + + private void encodeNetwork( + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + final int maxLength = limit - offset; + final int initialWin = initialMax - (int)(initialSeq - initialAck); + final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + + if (length > 0) + { + final int reserved = length + initialPad; + + doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); + + initialSeq += reserved; + + assert initialAck <= initialSeq; + } + + final int remaining = maxLength - length; + if (remaining > 0) + { + if (encodeSlot == NO_SLOT) + { + encodeSlot = encodePool.acquire(initialId); + } + + if (encodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); + encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeSlotOffset = remaining; + } + } + else + { + cleanupEncodeSlotIfNecessary(); + } + } + + private void decodeNetwork( + long traceId, + long authorization, + long budgetId, + int reserved, + MutableDirectBuffer buffer, + int offset, + int limit) + { + KafkaOffsetFetchClientDecoder previous = null; + int progress = offset; + while (progress <= limit && previous != decoder) + { + previous = decoder; + progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit); + } + + if (progress < limit) + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = decodePool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId); + } + else + { + final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot); + decodeBuffer.putBytes(0, buffer, progress, limit - progress); + decodeSlotOffset = limit - progress; + decodeSlotReserved = (limit - progress) * reserved / (limit - offset); + } + + doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax); + } + else + { + cleanupDecodeSlotIfNecessary(); + + if (KafkaState.replyClosing(state)) + { + doApplicationEnd(traceId); + } + else if (reserved > 0) + { + doNetworkWindow(traceId, budgetId, 0, 0, replyMax); + } + } + } + + @Override + protected void doDecodeSaslHandshakeResponse( + long traceId) + { + decoder = decodeSaslHandshakeResponse; + } + + @Override + protected void doDecodeSaslHandshake( + long traceId) + { + decoder = decodeSaslHandshake; + } + + @Override + protected void doDecodeSaslHandshakeMechanisms( + long traceId) + { + decoder = decodeSaslHandshakeMechanisms; + } + + @Override + protected void doDecodeSaslHandshakeMechansim( + long traceId) + { + decoder = decodeSaslHandshakeMechanism; + } + + @Override + protected void doDecodeSaslAuthenticateResponse( + long traceId) + { + decoder = decodeSaslAuthenticateResponse; + } + + @Override + protected void doDecodeSaslAuthenticate( + long traceId) + { + decoder = decodeSaslAuthenticate; + } + + @Override + protected void onDecodeSaslHandshakeResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + client.encoder = client.encodeSaslAuthenticateRequest; + client.decoder = decodeSaslAuthenticateResponse; + break; + default: + cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslAuthenticateResponse( + long traceId, + long authorization, + int errorCode) + { + switch (errorCode) + { + case ERROR_NONE: + client.encoder = client.encodeOffsetFetchRequest; + client.decoder = decodeOffsetFetchResponse; + break; + default: + cleanupApplication(traceId, errorCode); + doNetworkEnd(traceId, authorization); + break; + } + } + + @Override + protected void onDecodeSaslResponse( + long traceId) + { + nextResponseId++; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + private void onDecodeOffsetFetchResponse( + long traceId) + { + doApplicationBeginIfNecessary(traceId, authorization); + doApplicationWindow(traceId, 0L, 0, 0, 0); + + final KafkaDataExFW kafkaDataEx = kafkaDataExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(kafkaTypeId) + .offsetFetch(m -> + m.topic(t -> + t.topic(newTopic) + .offsets(o -> topicPartitions.forEach((k, v) -> + o.item(to -> to + .partitionId(k) + .partitionOffset(v) + ))))) + .build(); + + doApplicationData(traceId, authorization, kafkaDataEx); + + nextResponseId++; + } + + public void onDecodeTopic( + long traceId, + long authorization, + String topic) + { + newTopic = topic; + } + + public void onDecodePartition( + long traceId, + int partitionId, + long offsetCommitted, + int partitionError) + { + if (partitionError == ERROR_NONE) + { + topicPartitions.put(partitionId, (Long) offsetCommitted); + } + } + + private void cleanupNetwork( + long traceId) + { + doNetworkResetIfNecessary(traceId); + doNetworkAbortIfNecessary(traceId); + + cleanupApplication(traceId, EMPTY_OCTETS); + } + + private void cleanupDecodeSlotIfNecessary() + { + if (decodeSlot != NO_SLOT) + { + decodePool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + decodeSlotReserved = 0; + } + } + + private void cleanupEncodeSlotIfNecessary() + { + if (encodeSlot != NO_SLOT) + { + encodePool.release(encodeSlot); + encodeSlot = NO_SLOT; + encodeSlotOffset = 0; + encodeSlotTraceId = 0; + } + } + } + } + + +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java index 702792edc7..c9383a6581 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java @@ -34,8 +34,10 @@ import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.collections.Int2IntHashMap; +import org.agrona.collections.Int2ObjectHashMap; import org.agrona.collections.Long2LongHashMap; import org.agrona.collections.MutableInteger; +import org.agrona.collections.MutableReference; import org.agrona.concurrent.UnsafeBuffer; import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding; @@ -71,6 +73,8 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDescribeDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFetchDataExFW; @@ -81,6 +85,7 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaMetaDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaTopicPartitionFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; import io.aklivity.zilla.runtime.engine.EngineContext; @@ -142,6 +147,7 @@ public final class KafkaMergedFactory implements BindingHandler private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder(); private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder(); private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder(); + private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder(); private final MutableInteger partitionCount = new MutableInteger(); private final MutableInteger initialNoAckRW = new MutableInteger(); @@ -958,7 +964,8 @@ private void doReset( long acknowledge, int maximum, long traceId, - long authorization) + long authorization, + Consumer extension) { final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -969,6 +976,7 @@ private void doReset( .maximum(maximum) .traceId(traceId) .authorization(authorization) + .extension(extension) .build(); sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); @@ -1001,6 +1009,8 @@ private final class KafkaMergedStream private final KafkaUnmergedMetaStream metaStream; private final List fetchStreams; private final List produceStreams; + private final Int2ObjectHashMap consumers; + private final Int2IntHashMap leadersByAssignedId; private final Int2IntHashMap leadersByPartitionId; private final Long2LongHashMap latestOffsetByPartitionId; private final Long2LongHashMap stableOffsetByPartitionId; @@ -1035,7 +1045,11 @@ private final class KafkaMergedStream private int fetchStreamIndex; private long mergedReplyBudgetId = NO_CREDITOR_INDEX; + private KafkaUnmergedConsumerStream consumerStream; private KafkaUnmergedProduceStream producer; + private String groupId; + private String consumerId; + private int timeout; KafkaMergedStream( MessageConsumer sender, @@ -1067,7 +1081,9 @@ private final class KafkaMergedStream this.metaStream = new KafkaUnmergedMetaStream(this); this.fetchStreams = new ArrayList<>(); this.produceStreams = new ArrayList<>(); + this.consumers = new Int2ObjectHashMap<>(); this.leadersByPartitionId = new Int2IntHashMap(-1); + this.leadersByAssignedId = new Int2IntHashMap(-1); this.latestOffsetByPartitionId = new Long2LongHashMap(-3); this.stableOffsetByPartitionId = new Long2LongHashMap(-3); this.nextOffsetsById = initialOffsetsById; @@ -1148,6 +1164,14 @@ private void onMergedInitialBegin( this.maximumOffset = asMaximumOffset(mergedBeginEx.partitions()); this.filters = asMergedFilters(filters); this.evaluation = mergedBeginEx.evaluation(); + this.groupId = mergedBeginEx.groupId().asString(); + this.consumerId = mergedBeginEx.consumerId().asString(); + this.timeout = mergedBeginEx.timeout(); + + if (groupId != null && !groupId.isEmpty()) + { + this.consumerStream = new KafkaUnmergedConsumerStream(this); + } describeStream.doDescribeInitialBegin(traceId); } @@ -1179,6 +1203,7 @@ private void onMergedInitialData( final int flags = data.flags(); final OctetsFW payload = data.payload(); final OctetsFW extension = data.extension(); + MutableReference consumerId = new MutableReference<>(); if (producer == null) { @@ -1198,17 +1223,31 @@ private void onMergedInitialData( final int nextPartitionId = partitionId == DYNAMIC_PARTITION ? nextPartitionData(hashKey, key) : partitionId; final KafkaUnmergedProduceStream newProducer = findProducePartitionLeader(nextPartitionId); - assert newProducer != null; // TODO this.producer = newProducer; - } - assert producer != null; + if (this.producer == null) + { + consumerId.set(consumers.get(nextPartitionId)); + } + } - producer.doProduceInitialData(traceId, reserved, flags, budgetId, payload, extension); + if (this.producer != null) + { + producer.doProduceInitialData(traceId, reserved, flags, budgetId, payload, extension); - if ((flags & FLAGS_FIN) != FLAGS_NONE) + if ((flags & FLAGS_FIN) != FLAGS_NONE) + { + this.producer = null; + } + } + else { - this.producer = null; + doMergedInitialReset(traceId, ex -> ex.set((b, o, l) -> kafkaResetExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .error(0) + .consumerId(consumerId.get()) + .build() + .sizeof())); } } } @@ -1224,7 +1263,7 @@ private int nextPartitionData( KafkaKeyFW hashKey, KafkaKeyFW key) { - final int partitionCount = leadersByPartitionId.size(); + final int partitionCount = leadersByAssignedId.size(); final int keyHash = hashKey.length() != -1 ? defaultKeyHash(hashKey) : key.length() != -1 ? defaultKeyHash(key) : nextNullKeyHashData++; @@ -1236,7 +1275,7 @@ private int nextPartitionData( private int nextPartitionFlush( KafkaKeyFW key) { - final int partitionCount = leadersByPartitionId.size(); + final int partitionCount = leadersByAssignedId.size(); final int keyHash = key.length() != -1 ? defaultKeyHash(key) : nextNullKeyHashFlush++; final int partitionId = partitionCount > 0 ? (0x7fff_ffff & keyHash) % partitionCount : 0; @@ -1295,18 +1334,18 @@ private void onMergedInitialFlush( assert kafkaFlushEx != null; assert kafkaFlushEx.kind() == KafkaFlushExFW.KIND_MERGED; final KafkaMergedFlushExFW kafkaMergedFlushEx = kafkaFlushEx.merged(); - final KafkaCapabilities newCapabilities = kafkaMergedFlushEx.capabilities().get(); - final Array32FW filters = kafkaMergedFlushEx.filters(); + final KafkaCapabilities newCapabilities = kafkaMergedFlushEx.fetch().capabilities().get(); + final Array32FW filters = kafkaMergedFlushEx.fetch().filters(); final List newFilters = asMergedFilters(filters); if (capabilities != newCapabilities) { - this.maximumOffset = asMaximumOffset(kafkaMergedFlushEx.progress()); + this.maximumOffset = asMaximumOffset(kafkaMergedFlushEx.fetch().progress()); if (hasFetchCapability(newCapabilities) && !hasFetchCapability(capabilities)) { final Long2LongHashMap initialOffsetsById = new Long2LongHashMap(-3L); - kafkaMergedFlushEx.progress().forEach(p -> + kafkaMergedFlushEx.fetch().progress().forEach(p -> { final long partitionId = p.partitionId(); if (partitionId >= 0L) @@ -1330,8 +1369,8 @@ private void onMergedInitialFlush( { if (hasProduceCapability(capabilities)) { - final KafkaOffsetFW partition = kafkaMergedFlushEx.partition(); - final KafkaKeyFW key = kafkaMergedFlushEx.key(); + final KafkaOffsetFW partition = kafkaMergedFlushEx.fetch().partition(); + final KafkaKeyFW key = kafkaMergedFlushEx.fetch().key(); if (partition != null) { final int partitionId = partition.partitionId(); @@ -1347,7 +1386,7 @@ private void onMergedInitialFlush( if (hasFetchCapability(capabilities) && !newFilters.equals(this.filters)) { this.filters = newFilters; - final int partitionCount = leadersByPartitionId.size(); + final int partitionCount = leadersByAssignedId.size(); for (int partitionId = 0; partitionId < partitionCount; partitionId++) { doFetchInitialFlush(traceId, partitionId); @@ -1624,13 +1663,14 @@ private void doMergedInitialWindow( } private void doMergedInitialReset( - long traceId) + long traceId, + Consumer extension) { assert !KafkaState.initialClosed(state); state = KafkaState.closedInitial(state); doReset(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization); + traceId, authorization, extension); } private void doMergedReplyEndIfNecessary( @@ -1675,12 +1715,12 @@ private void doMergedReplyFlush( { final KafkaFlushExFW kafkaFlushExFW = kafkaFlushExRW.wrap(extBuffer, 0, extBuffer.capacity()) .typeId(kafkaTypeId) - .merged(f -> f - .progress(ps -> nextOffsetsById.longForEach((p, o) -> - ps.item(i -> i.partitionId((int) p) - .partitionOffset(o) - .stableOffset(initialStableOffsetsById.get(p)) - .latestOffset(initialLatestOffsetsById.get(p)))))) + .merged(ff -> ff + .fetch(f -> f.progress(ps -> nextOffsetsById.longForEach((p, o) -> + ps.item(i -> i.partitionId((int) p) + .partitionOffset(o) + .stableOffset(initialStableOffsetsById.get(p)) + .latestOffset(initialLatestOffsetsById.get(p))))))) .build(); doFlush(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, @@ -1729,7 +1769,7 @@ private void doMergedInitialResetIfNecessary( cleanupBudgetCreditorIfNecessary(); if (fetchStreams.isEmpty()) { - doMergedInitialReset(traceId); + doMergedInitialReset(traceId, EMPTY_EXTENSION); } } } @@ -1765,7 +1805,44 @@ private void onTopicMetaDataChanged( partitions.forEach(p -> leadersByPartitionId.put(p.partitionId(), p.leaderId())); partitionCount.value = 0; partitions.forEach(partition -> partitionCount.value++); - assert leadersByPartitionId.size() == partitionCount.value; + + if (this.consumerStream != null) + { + this.consumerStream.doConsumerInitialBeginIfNecessary(traceId); + } + else + { + leadersByAssignedId.clear(); + partitions.forEach(p -> leadersByAssignedId.put(p.partitionId(), p.leaderId())); + assert leadersByAssignedId.size() == partitionCount.value; + + doFetchPartitionsIfNecessary(traceId); + doProducePartitionsIfNecessary(traceId); + } + } + + private void onTopicConsumerDataChanged( + long traceId, + Array32FW partitions, + Array32FW newAssignments) + { + leadersByAssignedId.clear(); + partitions.forEach(p -> + { + int partitionId = p.partitionId(); + int leaderId = leadersByPartitionId.get(partitionId); + leadersByAssignedId.put(partitionId, leaderId); + }); + + consumers.clear(); + newAssignments.forEach(a -> + { + a.partitions().forEach(p -> + { + final String consumerId = a.consumerId().asString(); + consumers.put(p.partitionId(), consumerId); + }); + }); doFetchPartitionsIfNecessary(traceId); doProducePartitionsIfNecessary(traceId); @@ -1776,19 +1853,14 @@ private void doFetchPartitionsIfNecessary( { if (hasFetchCapability(capabilities)) { - final int partitionCount = leadersByPartitionId.size(); - for (int partitionId = 0; partitionId < partitionCount; partitionId++) - { - doFetchPartitionIfNecessary(traceId, partitionId); - } - assert fetchStreams.size() >= leadersByPartitionId.size(); + leadersByAssignedId.forEach((k, v) -> doFetchPartitionIfNecessary(traceId, k)); + assert fetchStreams.size() >= leadersByAssignedId.size(); - int offsetCount = nextOffsetsById.size(); - for (int partitionId = partitionCount; partitionId < offsetCount; partitionId++) - { - nextOffsetsById.remove(partitionId); - } - assert nextOffsetsById.size() <= leadersByPartitionId.size(); + nextOffsetsById.entrySet() + .removeIf( + entry -> !leadersByAssignedId.containsKey(entry.getKey().intValue())); + + assert nextOffsetsById.size() <= leadersByAssignedId.size(); } } @@ -1797,12 +1869,8 @@ private void doProducePartitionsIfNecessary( { if (hasProduceCapability(capabilities)) { - final int partitionCount = leadersByPartitionId.size(); - for (int partitionId = 0; partitionId < partitionCount; partitionId++) - { - doProducePartitionIfNecessary(traceId, partitionId); - } - assert produceStreams.size() >= leadersByPartitionId.size(); + leadersByAssignedId.forEach((k, v) -> doProducePartitionIfNecessary(traceId, k)); + assert produceStreams.size() >= leadersByAssignedId.size(); } } @@ -1810,7 +1878,7 @@ private void doFetchPartitionIfNecessary( long traceId, int partitionId) { - final int leaderId = leadersByPartitionId.get(partitionId); + final int leaderId = leadersByAssignedId.get(partitionId); final long partitionOffset = nextFetchPartitionOffset(partitionId); KafkaUnmergedFetchStream leader = findFetchPartitionLeader(partitionId); @@ -1941,7 +2009,7 @@ private void doProducePartitionIfNecessary( long traceId, int partitionId) { - final int leaderId = leadersByPartitionId.get(partitionId); + final int leaderId = leadersByAssignedId.get(partitionId); KafkaUnmergedProduceStream leader = findProducePartitionLeader(partitionId); @@ -1967,7 +2035,7 @@ private void onProducePartitionLeaderReady( long traceId, long partitionId) { - if (produceStreams.size() == leadersByPartitionId.size()) + if (produceStreams.size() == leadersByAssignedId.size()) { if (!KafkaState.initialOpened(state)) { @@ -1993,7 +2061,7 @@ private void onProducePartitionLeaderError( final KafkaUnmergedProduceStream leader = findProducePartitionLeader(partitionId); assert leader != null; - if (leadersByPartitionId.containsKey(partitionId)) + if (leadersByAssignedId.containsKey(partitionId)) { leader.doProduceInitialBegin(traceId); } @@ -2279,7 +2347,7 @@ private void doDescribeReplyReset( state = KafkaState.closedReply(state); doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, - traceId, merged.authorization); + traceId, merged.authorization, EMPTY_EXTENSION); } } @@ -2535,7 +2603,271 @@ private void doMetaReplyReset( state = KafkaState.closedReply(state); doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, - traceId, merged.authorization); + traceId, merged.authorization, EMPTY_EXTENSION); + } + } + + private final class KafkaUnmergedConsumerStream + { + private final KafkaMergedStream merged; + + private long initialId; + private long replyId; + private MessageConsumer receiver = NO_RECEIVER; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + + private long replySeq; + private long replyAck; + private int replyMax; + + private KafkaUnmergedConsumerStream( + KafkaMergedStream merged) + { + this.merged = merged; + } + + private void doConsumerInitialBeginIfNecessary( + long traceId) + { + if (!KafkaState.initialOpening(state)) + { + doConsumerInitialBegin(traceId); + } + } + + private void doConsumerInitialBegin( + long traceId) + { + assert state == 0; + + state = KafkaState.openingInitial(state); + + this.initialId = supplyInitialId.applyAsLong(merged.resolvedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.receiver = newStream(this::onConsumerReply, + merged.routedId, merged.resolvedId, initialId, initialSeq, initialAck, initialMax, + traceId, merged.authorization, 0L, + ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .consumer(c -> c + .groupId(merged.groupId) + .consumerId(merged.consumerId) + .timeout(merged.timeout) + .topic(merged.topic) + .partitionIds(p -> merged.leadersByPartitionId.forEach((k, v) -> + p.item(tp -> tp.partitionId(k)))) + ) + .build() + .sizeof())); + } + + private void doConsumerInitialEndIfNecessary( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doConsumerInitialEnd(traceId); + } + } + + private void doConsumerInitialEnd( + long traceId) + { + state = KafkaState.closedInitial(state); + + doEnd(receiver, merged.routedId, merged.resolvedId, initialId, initialSeq, initialAck, initialMax, + traceId, merged.authorization, EMPTY_EXTENSION); + } + + private void doConsumerInitialAbortIfNecessary( + long traceId) + { + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + { + doConsumerInitialAbort(traceId); + } + } + + private void doConsumerInitialAbort( + long traceId) + { + state = KafkaState.closedInitial(state); + + doAbort(receiver, merged.routedId, merged.resolvedId, initialId, initialSeq, initialAck, initialMax, + traceId, merged.authorization, EMPTY_EXTENSION); + } + + private void onConsumerReply( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConsumerReplyBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConsumerReplyData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConsumerReplyEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConsumerReplyAbort(abort); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConsumerInitialReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConsumerInitialWindow(window); + break; + default: + break; + } + } + + private void onConsumerReplyBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + doConsumerReplyWindow(traceId, 0, 8192); + } + + private void onConsumerReplyData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int reserved = data.reserved(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + + if (replySeq > replyAck + replyMax) + { + merged.doMergedCleanup(traceId); + } + else + { + final KafkaDataExFW kafkaDataEx = extension.get(kafkaDataExRO::wrap); + final KafkaConsumerDataExFW kafkaConsumerDataEx = kafkaDataEx.consumer(); + final Array32FW partitions = kafkaConsumerDataEx.partitions(); + final Array32FW assignments = kafkaConsumerDataEx.assignments(); + merged.onTopicConsumerDataChanged(traceId, partitions, assignments); + + doConsumerReplyWindow(traceId, 0, replyMax); + } + } + + private void onConsumerReplyEnd( + EndFW end) + { + final long traceId = end.traceId(); + + state = KafkaState.closedReply(state); + + merged.doMergedReplyBeginIfNecessary(traceId); + merged.doMergedReplyEndIfNecessary(traceId); + + doConsumerInitialEndIfNecessary(traceId); + } + + private void onConsumerReplyAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + state = KafkaState.closedReply(state); + + merged.doMergedReplyAbortIfNecessary(traceId); + + doConsumerInitialAbortIfNecessary(traceId); + } + + private void onConsumerInitialReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + state = KafkaState.closedInitial(state); + + merged.doMergedInitialResetIfNecessary(traceId); + + doConsumerReplyResetIfNecessary(traceId); + } + + private void onConsumerInitialWindow( + WindowFW window) + { + if (!KafkaState.initialOpened(state)) + { + final long traceId = window.traceId(); + + state = KafkaState.openedInitial(state); + + merged.doMergedInitialWindow(traceId, 0L); + } + } + + private void doConsumerReplyWindow( + long traceId, + int minReplyNoAck, + int minReplyMax) + { + final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck); + + if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state)) + { + replyAck = newReplyAck; + assert replyAck <= replySeq; + + replyMax = minReplyMax; + + state = KafkaState.openedReply(state); + + doWindow(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, + traceId, merged.authorization, 0L, merged.replyPad, DEFAULT_MINIMUM); + } + } + + private void doConsumerReplyResetIfNecessary( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doConsumerReplyReset(traceId); + } + } + + private void doConsumerReplyReset( + long traceId) + { + state = KafkaState.closedReply(state); + + doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, + traceId, merged.authorization, EMPTY_EXTENSION); } } @@ -2925,7 +3257,7 @@ private void doFetchReplyReset( state = KafkaState.closedReply(state); doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, - traceId, merged.authorization); + traceId, merged.authorization, EMPTY_EXTENSION); } private void setFetchFilter( @@ -3093,8 +3425,8 @@ private void doProduceInitialFlush( .typeId(kafkaTypeId) .produce(c -> { - c.partition(kafkaMergedFlushEx.partition()); - c.key(kafkaMergedFlushEx.key()); + c.partition(kafkaMergedFlushEx.fetch().partition()); + c.key(kafkaMergedFlushEx.fetch().key()); }) .build(); @@ -3334,7 +3666,7 @@ private void doProduceReplyReset( state = KafkaState.closedReply(state); doReset(receiver, merged.routedId, merged.resolvedId, replyId, replySeq, replyAck, replyMax, - traceId, merged.authorization); + traceId, merged.authorization, EMPTY_EXTENSION); } } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaOffsetFetchTopic.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaOffsetFetchTopic.java new file mode 100644 index 0000000000..f0fbc5f4ec --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaOffsetFetchTopic.java @@ -0,0 +1,32 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import java.util.List; + +public final class KafkaOffsetFetchTopic +{ + final String topic; + List partitions; + + KafkaOffsetFetchTopic( + String topic, + List partitions) + { + this.topic = topic; + this.partitions = partitions; + } +} diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl index c789a6b03a..41461651fc 100644 --- a/runtime/binding-kafka/src/main/zilla/protocol.idl +++ b/runtime/binding-kafka/src/main/zilla/protocol.idl @@ -416,15 +416,8 @@ scope protocol struct Assignment { string16 memberId; - uint32 length; - octets[length] value; - } - - struct TopicPartition - { - int32 version; - string16 topic; - int32 partitionCount; + int32 length; + octets[length] value = null; } struct Partition @@ -477,22 +470,35 @@ scope protocol string16 groupInstanceId = null; } - struct TopicPartition + struct OffsetFetchRequest { - int32 partitionId; + string16 groupId; + int32 topicCount; } - struct ConsumerAssignment + struct OffsetFetchTopicRequest { - string16 consumerId; - TopicPartition[] partitions; + string16 topic; + int32 partitionsCount; } - struct MemberAssignment + struct OffsetFetchResponse { - string16 topic; - TopicPartition[] partitions; - octets userdata; + int32 topicCount; + } + + struct OffsetFetchTopicResponse + { + string16 name; + int32 partitionCount; + } + + struct OffsetFetchPartition + { + int32 partitionIndex; + int64 committedOffset; + string16 metadata = null; + int16 errorCode; } } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java new file mode 100644 index 0000000000..9740c276bd --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java @@ -0,0 +1,64 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class CacheConsumerIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/consumer"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("app1") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("cache.when.topic.yaml") + @Specification({ + "${app}/partition.assignment/client", + "${app}/partition.assignment/server" + }) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldAssignPartition() throws Exception + { + k3po.finish(); + } + +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java index 49985db410..ded800e0db 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java @@ -585,4 +585,14 @@ public void shouldReceiveMessagesWithHeadersSkipManyFilter() throws Exception { k3po.finish(); } + + @Test + @Configuration("cache.options.merged.yaml") + @Specification({ + "${app}/merged.group.fetch.message.value/client", + "${app}/unmerged.group.fetch.message.value/server"}) + public void shouldFetchGroupMessageValue() throws Exception + { + k3po.finish(); + } } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java new file mode 100644 index 0000000000..12ebdf2fe7 --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CACHE_SERVER_BOOTSTRAP; +import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration.KAFKA_CACHE_SERVER_RECONNECT_DELAY; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_BUFFER_SLOT_CAPACITY; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class CacheOffsetFetchIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(10, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) + .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) + .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("app1") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/partition.offset/client", + "${app}/partition.offset/server"}) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldFetchPartitionOffset() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java new file mode 100644 index 0000000000..e7b552c39d --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java @@ -0,0 +1,64 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.ScriptProperty; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class ClientConsumerIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/application/group") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/consumer"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/partition.assignment/client", + "${net}/partition.assignment/server"}) + @ScriptProperty("serverAddress \"zilla://streams/net0\"") + public void shouldAssignGroupPartition() throws Exception + { + k3po.finish(); + } +} diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java index 937bbc57a7..f4bdf04b0e 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java @@ -65,7 +65,6 @@ public void shouldHandleClientSentWriteAbortBeforeCoordinatorResponse() throws E @Specification({ "${app}/rebalance.protocol.highlander/client", "${net}/rebalance.protocol.highlander/server"}) - public void shouldLeaveGroupOnGroupRebalanceError() throws Exception { k3po.finish(); @@ -124,7 +123,7 @@ public void shouldRejectSecondStreamOnUnknownProtocol() throws Exception @Test @Configuration("client.yaml") @Specification({ - "${app}/leader/client", + "${app}/rebalance.sync.group/client", "${net}/rebalance.sync.group/server"}) public void shouldHandleRebalanceSyncGroup() throws Exception { diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java new file mode 100644 index 0000000000..8e410691a9 --- /dev/null +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java @@ -0,0 +1,62 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class ClientOffsetFetchIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/partition.offset/client", + "${net}/topic.offset.info/server"}) + public void shouldFetchPartitionLastCommittedOffset() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/client.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/client.rpt index e57f947461..9e463519a3 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/client.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/client.rpt @@ -134,6 +134,7 @@ read ${grpc:protobuf() read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/server.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/server.rpt index 67c95b6b5d..689d60bb74 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/server.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/bidi.stream.rpc/server.rpt @@ -132,6 +132,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/client.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/client.rpt index 9b14d76b8b..70238172e0 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/client.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/client.rpt @@ -131,6 +131,7 @@ read zilla:data.null read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/server.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/server.rpt index b5fcc11706..20a82598b7 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/server.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/client.stream.rpc/server.rpt @@ -130,6 +130,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/client.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/client.rpt index f9f4c96f5b..7924467f0b 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/client.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/client.rpt @@ -115,6 +115,7 @@ read ${grpc:protobuf() read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/server.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/server.rpt index 06f67ddf4c..4de87c778e 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/server.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/server.stream.rpc/server.rpt @@ -115,6 +115,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/client.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/client.rpt index 3267fdb0c9..7ce3c0b948 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/client.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/client.rpt @@ -113,6 +113,7 @@ read zilla:data.null read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/server.rpt b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/server.rpt index 07b295000f..f847566d05 100644 --- a/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/server.rpt +++ b/specs/binding-grpc-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/kafka/streams/kafka/produce/unary.rpc/server.rpt @@ -114,6 +114,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/client.rpt index f731d79b04..1bc6584da2 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/client.rpt @@ -51,6 +51,7 @@ read '{ "name": "widget" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/server.rpt index dd908087ae..9dd9f5881c 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.modified/server.rpt @@ -55,6 +55,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/client.rpt index b3dae39e06..82283db856 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/client.rpt @@ -50,6 +50,7 @@ read '{ "name": "widget" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/server.rpt index 382be07ace..39541da663 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.item.no.etag.modified/server.rpt @@ -54,6 +54,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/client.rpt index a798987ec5..c8b4ac2fde 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/client.rpt @@ -70,6 +70,7 @@ read '{ "name": "gizmo" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/server.rpt index 2930ecee42..565fe37115 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.modified/server.rpt @@ -76,6 +76,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/client.rpt index 520e43e31b..55da055eb3 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/client.rpt @@ -73,6 +73,7 @@ read '{ "name": "gizmo" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 1, 1, 1) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/server.rpt index 9a6aeebc37..34e925d118 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items.write.flush/server.rpt @@ -79,6 +79,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 1, 1, 1) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/client.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/client.rpt index 257a5d614d..7b75e00edc 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/client.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/client.rpt @@ -71,6 +71,7 @@ read '{ "name": "gizmo" }' read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 1, 1, 1) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/server.rpt b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/server.rpt index 9b30da2f32..1591d0a2da 100644 --- a/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/server.rpt +++ b/specs/binding-http-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/http/kafka/streams/kafka/get.items/server.rpt @@ -77,6 +77,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 1, 1, 1) .progress(1, 1, 1, 1) .build() diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index e2b77ea581..da9f248554 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -59,9 +59,12 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.String16FW; import io.aklivity.zilla.specs.binding.kafka.internal.types.String8FW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.MemberAssignmentFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.TopicAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaApi; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBootstrapBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaConsumerDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaDataExFW; @@ -72,10 +75,13 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFetchFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; -import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedConsumerFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedDataExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFetchFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMetaDataExFW; @@ -87,6 +93,7 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaProduceFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaTopicPartitionFW; public final class KafkaFunctions { @@ -276,6 +283,24 @@ public static byte[] varint( } } + @Function + public static KafkaGroupMemberMetadataBuilder memberMetadata() + { + return new KafkaGroupMemberMetadataBuilder(); + } + + @Function + public static MemberAssignmentsBuilder memberAssignment() + { + return new MemberAssignmentsBuilder(); + } + + @Function + public static TopicAssignmentsBuilder topicAssignment() + { + return new TopicAssignmentsBuilder(); + } + public abstract static class KafkaHeadersBuilder { private final KafkaHeadersFW.Builder headersRW = new KafkaHeadersFW.Builder(); @@ -555,6 +580,123 @@ private void set( } } + public static final class KafkaGroupMemberMetadataBuilder + { + private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + + private final KafkaGroupMemberMetadataFW.Builder groupMemberMetadataRW = + new KafkaGroupMemberMetadataFW.Builder(); + + public KafkaGroupMemberMetadataBuilder() + { + groupMemberMetadataRW.wrap(writeBuffer, 0, writeBuffer.capacity()); + } + + public KafkaGroupMemberMetadataBuilder consumerId( + String consumerId) + { + groupMemberMetadataRW.consumerId(consumerId); + return this; + } + + public KafkaGroupMemberMetadataBuilder topic( + String topic, + int partitionId) + { + groupMemberMetadataRW.topics(t -> + t.item(tp -> tp.topic(topic) + .partitions(p -> p.item(i -> i.partitionId(partitionId))))); + return this; + } + + public byte[] build() + { + final KafkaGroupMemberMetadataFW metadata = groupMemberMetadataRW.build(); + final byte[] array = new byte[metadata.sizeof()]; + metadata.buffer().getBytes(metadata.offset(), array); + return array; + } + } + + public static final class MemberAssignmentsBuilder + { + private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + + private final Array32FW.Builder memberAssignments = + new Array32FW.Builder(new MemberAssignmentFW.Builder(), new MemberAssignmentFW()); + + public MemberAssignmentsBuilder() + { + memberAssignments.wrap(writeBuffer, 0, writeBuffer.capacity()); + } + + public MemberAssignmentsBuilder member( + String memberId, + String topic, + int partitionId, + String consumerId, + int consumerPartitionId) + { + memberAssignments.item(ma -> + ma.memberId(memberId) + .assignments(ta -> ta.item(i -> + i.topic(topic) + .partitions(p -> p.item(tpa -> tpa.partitionId(partitionId))) + .userdata(u -> + u.item(ud -> ud + .consumerId(consumerId) + .partitions(pt -> pt.item(pi -> pi.partitionId(consumerPartitionId))))) + ))); + return this; + } + + public byte[] build() + { + Array32FW members = memberAssignments.build(); + final byte[] array = new byte[members.sizeof()]; + members.buffer().getBytes(members.offset(), array); + return array; + } + } + + public static final class TopicAssignmentsBuilder + { + private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); + + private final Array32FW.Builder topicAssignments = + new Array32FW.Builder(new TopicAssignmentFW.Builder(), new TopicAssignmentFW()); + + public TopicAssignmentsBuilder() + { + topicAssignments.wrap(writeBuffer, 0, writeBuffer.capacity()); + } + + public TopicAssignmentsBuilder topic( + String topic, + int partitionId, + String consumerId, + int consumerPartitionId) + { + topicAssignments.item(i -> + i.topic(topic) + .partitions(p -> p.item(tpa -> tpa.partitionId(partitionId))) + .userdata(u -> + u.item(ud -> ud + .consumerId(consumerId) + .partitions(pt -> pt.item(pi -> pi.partitionId(consumerPartitionId))))) + ); + return this; + } + + public byte[] build() + { + Array32FW topics = topicAssignments.build(); + final byte[] array = new byte[topics.sizeof()]; + topics.buffer().getBytes(topics.offset(), array); + return array; + } + } + public static final class KafkaBeginExBuilder { private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); @@ -714,6 +856,13 @@ public KafkaMergedBeginExBuilder consumerId( return this; } + public KafkaMergedBeginExBuilder timeout( + int timeout) + { + mergedBeginExRW.timeout(timeout); + return this; + } + public KafkaMergedBeginExBuilder partition( int partitionId, long offset) @@ -1048,6 +1197,13 @@ public KafkaGroupBeginExBuilder timeout( return this; } + public KafkaGroupBeginExBuilder metadata( + byte[] metadata) + { + groupBeginExRW.metadataLen(metadata.length).metadata(m -> m.set(metadata)); + return this; + } + public KafkaBeginExBuilder build() { final KafkaGroupBeginExFW groupBeginEx = groupBeginExRW.build(); @@ -1059,11 +1215,15 @@ public KafkaBeginExBuilder build() public final class KafkaConsumerBeginExBuilder { private final KafkaConsumerBeginExFW.Builder consumerBeginExRW = new KafkaConsumerBeginExFW.Builder(); + private final MutableDirectBuffer partitionBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final Array32FW.Builder partitionsRW = + new Array32FW.Builder<>(new KafkaTopicPartitionFW.Builder(), new KafkaTopicPartitionFW()); private KafkaConsumerBeginExBuilder() { consumerBeginExRW.wrap(writeBuffer, KafkaBeginExFW.FIELD_OFFSET_CONSUMER, writeBuffer.capacity()); + partitionsRW.wrap(partitionBuffer, 0, partitionBuffer.capacity()); } public KafkaConsumerBeginExBuilder groupId( @@ -1073,6 +1233,20 @@ public KafkaConsumerBeginExBuilder groupId( return this; } + public KafkaConsumerBeginExBuilder consumerId( + String consumerId) + { + consumerBeginExRW.consumerId(consumerId); + return this; + } + + public KafkaConsumerBeginExBuilder timeout( + int timeout) + { + consumerBeginExRW.timeout(timeout); + return this; + } + public KafkaConsumerBeginExBuilder topic( String topic) { @@ -1083,12 +1257,13 @@ public KafkaConsumerBeginExBuilder topic( public KafkaConsumerBeginExBuilder partition( int partitionId) { - consumerBeginExRW.partitionIds(p -> p.item(i -> i.partitionId(partitionId))); + partitionsRW.item(i -> i.partitionId(partitionId)); return this; } public KafkaBeginExBuilder build() { + consumerBeginExRW.partitionIds(partitionsRW.build()); final KafkaConsumerBeginExFW consumerBeginEx = consumerBeginExRW.build(); beginExRO.wrap(writeBuffer, 0, consumerBeginEx.limit()); return KafkaBeginExBuilder.this; @@ -1218,13 +1393,6 @@ public KafkaProduceDataExBuilder produce() return new KafkaProduceDataExBuilder(); } - public KafkaGroupDataExBuilder group() - { - dataExRW.kind(KafkaApi.GROUP.value()); - - return new KafkaGroupDataExBuilder(); - } - public KafkaConsumerDataExBuilder consumer() { dataExRW.kind(KafkaApi.CONSUMER.value()); @@ -1761,62 +1929,47 @@ public KafkaDataExBuilder build() } } - public final class KafkaGroupDataExBuilder + public final class KafkaConsumerDataExBuilder { - private final KafkaGroupDataExFW.Builder groupDataExRW = new KafkaGroupDataExFW.Builder(); + private final KafkaConsumerDataExFW.Builder consumerDataExRW = new KafkaConsumerDataExFW.Builder(); - private KafkaGroupDataExBuilder() - { - groupDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_GROUP, writeBuffer.capacity()); - } + private final MutableDirectBuffer partitionBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final MutableDirectBuffer assignmentBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final Array32FW.Builder partitionsRW = + new Array32FW.Builder<>(new KafkaTopicPartitionFW.Builder(), new KafkaTopicPartitionFW()); - public KafkaGroupDataExBuilder leaderId( - String leaderId) - { - groupDataExRW.leaderId(leaderId); - return this; - } + private final Array32FW.Builder assignmentsRW = + new Array32FW.Builder<>(new KafkaConsumerAssignmentFW.Builder(), new KafkaConsumerAssignmentFW()); - public KafkaGroupDataExBuilder memberId( - String memberId) + private KafkaConsumerDataExBuilder() { - groupDataExRW.memberId(memberId); - return this; + consumerDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_CONSUMER, writeBuffer.capacity()); + partitionsRW.wrap(partitionBuffer, 0, partitionBuffer.capacity()); + assignmentsRW.wrap(assignmentBuffer, 0, assignmentBuffer.capacity()); } - public KafkaGroupDataExBuilder members( - int members) + public KafkaConsumerDataExBuilder partition( + int partitionId) { - groupDataExRW.members(members); + partitionsRW.item(i -> i.partitionId(partitionId)); return this; } - public KafkaDataExBuilder build() - { - final KafkaGroupDataExFW groupDataEx = groupDataExRW.build(); - dataExRO.wrap(writeBuffer, 0, groupDataEx.limit()); - return KafkaDataExBuilder.this; - } - } - - public final class KafkaConsumerDataExBuilder - { - private final KafkaConsumerDataExFW.Builder consumerDataExRW = new KafkaConsumerDataExFW.Builder(); - - private KafkaConsumerDataExBuilder() - { - consumerDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_GROUP, writeBuffer.capacity()); - } - - public KafkaConsumerDataExBuilder partition( + public KafkaConsumerDataExBuilder assignment( + String consumerId, int partitionId) { - consumerDataExRW.partitions(p -> p.item(i -> i.partitionId(partitionId))); + assignmentsRW.item(i -> i + .consumerId(consumerId) + .partitions(p -> p.item(tp -> tp.partitionId(partitionId)))); + return this; } public KafkaDataExBuilder build() { + consumerDataExRW.partitions(partitionsRW.build()); + consumerDataExRW.assignments(assignmentsRW.build()); final KafkaConsumerDataExFW consumerDataEx = consumerDataExRW.build(); dataExRO.wrap(writeBuffer, 0, consumerDataEx.limit()); return KafkaDataExBuilder.this; @@ -1835,12 +1988,11 @@ private KafkaOffsetFetchDataExBuilder() public KafkaOffsetFetchDataExBuilder topic( String topic, int partitionId, - long stableOffset, - long latestOffset) + long offset) { offsetFetchDataExRW.topic(t -> t.topic(topic).offsets(o -> o.item(i -> - i.partitionId(partitionId).stableOffset(stableOffset).latestOffset(latestOffset)))); + i.partitionId(partitionId).partitionOffset(offset)))); return this; } @@ -1947,105 +2099,182 @@ public final class KafkaMergedFlushExBuilder private KafkaMergedFlushExBuilder() { - mergedFlushExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_FETCH, writeBuffer.capacity()); + mergedFlushExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_MERGED, writeBuffer.capacity()); } - public KafkaMergedFlushExBuilder progress( - int partitionId, - long offset) + public KafkaMergedFetchFlushExBuilder fetch() { - progress(partitionId, offset, DEFAULT_LATEST_OFFSET); - return this; + mergedFlushExRW.kind(KafkaApi.FETCH.value()); + + return new KafkaMergedFetchFlushExBuilder(); } - public KafkaMergedFlushExBuilder progress( - int partitionId, - long offset, - long latestOffset) + public KafkaMergedConsumerFlushExBuilder consumer() { - mergedFlushExRW.progressItem(p -> p.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset)); - return this; + mergedFlushExRW.kind(KafkaApi.CONSUMER.value()); + + return new KafkaMergedConsumerFlushExBuilder(); + } + + public KafkaFlushExBuilder build() + { + final KafkaMergedFlushExFW mergedFlushEx = mergedFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, mergedFlushEx.limit()); + return KafkaFlushExBuilder.this; } - public KafkaMergedFlushExBuilder progress( + public final class KafkaMergedFetchFlushExBuilder + { + private final KafkaMergedFetchFlushExFW.Builder mergedFetchFlushExRW = new KafkaMergedFetchFlushExFW.Builder(); + + private KafkaMergedFetchFlushExBuilder() + { + mergedFetchFlushExRW.wrap(writeBuffer, + KafkaFlushExFW.FIELD_OFFSET_MERGED + KafkaMergedFlushExFW.FIELD_OFFSET_FETCH, + writeBuffer.capacity()); + } + + public KafkaMergedFetchFlushExBuilder progress( + int partitionId, + long offset) + { + progress(partitionId, offset, DEFAULT_LATEST_OFFSET); + return this; + } + + public KafkaMergedFetchFlushExBuilder progress( + int partitionId, + long offset, + long latestOffset) + { + mergedFetchFlushExRW.progressItem(p -> + p.partitionId(partitionId) + .partitionOffset(offset) + .latestOffset(latestOffset)); + return this; + } + + public KafkaMergedFetchFlushExBuilder progress( int partitionId, long offset, long stableOffset, long latestOffset) - { - mergedFlushExRW.progressItem(p -> p + { + mergedFetchFlushExRW.progressItem(p -> p .partitionId(partitionId) .partitionOffset(offset) .stableOffset(stableOffset) .latestOffset(latestOffset)); - return this; - } + return this; + } - public KafkaMergedFlushExBuilder capabilities( - String capabilities) - { - mergedFlushExRW.capabilities(c -> c.set(KafkaCapabilities.valueOf(capabilities))); - return this; - } + public KafkaMergedFetchFlushExBuilder capabilities( + String capabilities) + { + mergedFetchFlushExRW.capabilities(c -> c.set(KafkaCapabilities.valueOf(capabilities))); + return this; + } - public KafkaFilterBuilder filter() - { - return new KafkaFilterBuilder<>() + public KafkaFilterBuilder filter() { + return new KafkaFilterBuilder<>() + { - @Override - protected KafkaMergedFlushExBuilder build( - KafkaFilterFW filter) + @Override + protected KafkaMergedFetchFlushExBuilder build( + KafkaFilterFW filter) + { + mergedFetchFlushExRW.filtersItem(fb -> set(fb, filter)); + return KafkaFlushExBuilder.KafkaMergedFlushExBuilder.KafkaMergedFetchFlushExBuilder.this; + } + }; + } + + public KafkaMergedFetchFlushExBuilder partition( + int partitionId, + long partitionOffset) + { + partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); + return this; + } + + public KafkaMergedFetchFlushExBuilder partition( + int partitionId, + long partitionOffset, + long latestOffset) + { + mergedFetchFlushExRW.partition(p -> p + .partitionId(partitionId) + .partitionOffset(partitionOffset) + .latestOffset(latestOffset)); + return this; + } + + + public KafkaMergedFetchFlushExBuilder key( + String key) + { + if (key == null) { - mergedFlushExRW.filtersItem(fb -> set(fb, filter)); - return KafkaMergedFlushExBuilder.this; + mergedFetchFlushExRW.key(m -> m.length(-1) + .value((OctetsFW) null)); } - }; - } + else + { + keyRO.wrap(key.getBytes(UTF_8)); + mergedFetchFlushExRW.key(k -> k.length(keyRO.capacity()) + .value(keyRO, 0, keyRO.capacity())); + } + return this; + } - public KafkaMergedFlushExBuilder partition( - int partitionId, - long partitionOffset) - { - partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); - return this; + public KafkaFlushExBuilder build() + { + final KafkaMergedFetchFlushExFW mergedFetchFlushEx = mergedFetchFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, mergedFetchFlushExRW.limit()); + return KafkaFlushExBuilder.this; + } } - public KafkaMergedFlushExBuilder partition( - int partitionId, - long partitionOffset, - long latestOffset) + public final class KafkaMergedConsumerFlushExBuilder { - mergedFlushExRW.partition(p -> p - .partitionId(partitionId) - .partitionOffset(partitionOffset) - .latestOffset(latestOffset)); - return this; - } + private final KafkaMergedConsumerFlushExFW.Builder mergedConsumerFlushExRW = + new KafkaMergedConsumerFlushExFW.Builder(); + private KafkaMergedConsumerFlushExBuilder() + { + mergedConsumerFlushExRW.wrap(writeBuffer, + KafkaFlushExFW.FIELD_OFFSET_MERGED + KafkaMergedFlushExFW.FIELD_OFFSET_CONSUMER, + writeBuffer.capacity()); + } - public KafkaMergedFlushExBuilder key( - String key) - { - if (key == null) + public KafkaMergedConsumerFlushExBuilder partition( + int partitionId, + long partitionOffset) { - mergedFlushExRW.key(m -> m.length(-1) - .value((OctetsFW) null)); + partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); + return this; } - else + + public KafkaMergedConsumerFlushExBuilder partition( + int partitionId, + long partitionOffset, + long latestOffset) { - keyRO.wrap(key.getBytes(UTF_8)); - mergedFlushExRW.key(k -> k.length(keyRO.capacity()) - .value(keyRO, 0, keyRO.capacity())); + mergedConsumerFlushExRW.partition(p -> p + .partitionId(partitionId) + .partitionOffset(partitionOffset) + .latestOffset(latestOffset)); + return this; } - return this; - } - public KafkaFlushExBuilder build() - { - final KafkaMergedFlushExFW mergedFlushEx = mergedFlushExRW.build(); - flushExRO.wrap(writeBuffer, 0, mergedFlushEx.limit()); - return KafkaFlushExBuilder.this; + public KafkaFlushExBuilder build() + { + final KafkaMergedConsumerFlushExFW mergedConsumerFlushEx = mergedConsumerFlushExRW.build(); + flushExRO.wrap(writeBuffer, 0, mergedConsumerFlushExRW.limit()); + return KafkaFlushExBuilder.this; + } } } @@ -2082,9 +2311,9 @@ public KafkaFetchFlushExBuilder partition( long latestOffset) { fetchFlushExRW.partition(p -> p.partitionId(partitionId) - .partitionOffset(offset) - .stableOffset(stableOffset) - .latestOffset(latestOffset)); + .partitionOffset(offset) + .stableOffset(stableOffset) + .latestOffset(latestOffset)); return this; } @@ -2177,46 +2406,52 @@ public KafkaFlushExBuilder build() public final class KafkaGroupFlushExBuilder { - private final KafkaGroupFlushExFW.Builder groupFlushExRW = new KafkaGroupFlushExFW.Builder(); + private final MutableDirectBuffer memberBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final KafkaGroupFlushExFW.Builder flushGroupExRW = new KafkaGroupFlushExFW.Builder(); + private final Array32FW.Builder memberRW = + new Array32FW.Builder<>(new KafkaGroupMemberFW.Builder(), new KafkaGroupMemberFW()); private KafkaGroupFlushExBuilder() { - groupFlushExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_FETCH, writeBuffer.capacity()); + flushGroupExRW.wrap(writeBuffer, KafkaFlushExFW.FIELD_OFFSET_GROUP, writeBuffer.capacity()); + memberRW.wrap(memberBuffer, 0, memberBuffer.capacity()); } - public KafkaGroupFlushExBuilder partition( - int partitionId, - long partitionOffset) + public KafkaGroupFlushExBuilder leaderId( + String leaderId) { - partition(partitionId, partitionOffset, DEFAULT_LATEST_OFFSET); + flushGroupExRW.leaderId(leaderId); return this; } - public KafkaGroupFlushExBuilder partition( - int partitionId, - long partitionOffset, - long latestOffset) + public KafkaGroupFlushExBuilder memberId( + String memberId) { - partition(partitionId, partitionOffset, latestOffset, latestOffset); + flushGroupExRW.memberId(memberId); return this; } - public KafkaGroupFlushExBuilder partition( - int partitionId, - long offset, - long stableOffset, - long latestOffset) + public KafkaGroupFlushExBuilder members( + String memberId, + byte[] metadata) { - groupFlushExRW.partition(p -> p.partitionId(partitionId) - .partitionOffset(offset) - .stableOffset(stableOffset) - .latestOffset(latestOffset)); + memberRW.item(gm -> gm.id(memberId) + .metadataLen(metadata.length) + .metadata(md -> md.set(metadata))); + return this; + } + + public KafkaGroupFlushExBuilder members( + String memberId) + { + memberRW.item(gm -> gm.id(memberId)); return this; } public KafkaFlushExBuilder build() { - final KafkaGroupFlushExFW groupFlushEx = groupFlushExRW.build(); + flushGroupExRW.members(memberRW.build()); + final KafkaGroupFlushExFW groupFlushEx = flushGroupExRW.build(); flushExRO.wrap(writeBuffer, 0, groupFlushEx.limit()); return KafkaFlushExBuilder.this; } @@ -2308,15 +2543,6 @@ public KafkaProduceDataExMatcherBuilder produce() return matcherBuilder; } - public KafkaGroupDataExMatchBuilder group() - { - final KafkaGroupDataExMatchBuilder matcherBuilder = new KafkaGroupDataExMatchBuilder(); - - this.kind = KafkaApi.GROUP.value(); - this.caseMatcher = matcherBuilder::match; - return matcherBuilder; - } - public KafkaDataExMatcherBuilder typeId( int typeId) { @@ -3086,70 +3312,6 @@ private boolean matchFilters( return filters == null || filters == mergedDataEx.filters(); } } - - public final class KafkaGroupDataExMatchBuilder - { - private String16FW leaderId; - private String16FW memberId; - private Integer members; - - private KafkaGroupDataExMatchBuilder() - { - } - - public KafkaGroupDataExMatchBuilder leaderId( - String leaderId) - { - this.leaderId = new String16FW(leaderId); - return this; - } - - public KafkaGroupDataExMatchBuilder memberId( - String memberId) - { - this.memberId = new String16FW(memberId); - return this; - } - - public KafkaGroupDataExMatchBuilder members( - int members) - { - this.members = Integer.valueOf(members); - return this; - } - - public KafkaDataExMatcherBuilder build() - { - return KafkaDataExMatcherBuilder.this; - } - - private boolean match( - KafkaDataExFW dataEx) - { - final KafkaGroupDataExFW groupDataEx = dataEx.group(); - return matchLeaderId(groupDataEx) && - matchMemberId(groupDataEx) && - matchmembers(groupDataEx); - } - - private boolean matchLeaderId( - final KafkaGroupDataExFW groupDataEx) - { - return leaderId == null || leaderId.equals(groupDataEx.leaderId()); - } - - private boolean matchMemberId( - final KafkaGroupDataExFW groupDataEx) - { - return memberId == null || memberId.equals(groupDataEx.memberId()); - } - - private boolean matchmembers( - final KafkaGroupDataExFW groupDataEx) - { - return members != null && members == groupDataEx.members(); - } - } } public static final class KafkaFlushExMatcherBuilder @@ -3190,6 +3352,15 @@ public KafkaProduceFlushExMatcherBuilder produce() return matcherBuilder; } + public KafkaGroupFlushExMatchBuilder group() + { + final KafkaGroupFlushExMatchBuilder matcherBuilder = new KafkaGroupFlushExMatchBuilder(); + + this.kind = KafkaApi.GROUP.value(); + this.caseMatcher = matcherBuilder::match; + return matcherBuilder; + } + public KafkaFlushExMatcherBuilder typeId( int typeId) { @@ -3313,7 +3484,6 @@ public KafkaFilterBuilder() { - @Override protected KafkaFlushExMatcherBuilder.KafkaFetchFlushExMatcherBuilder build( KafkaFilterFW filter) @@ -3355,177 +3525,208 @@ private boolean matchFilters( { return filtersRW == null || filtersRW.build().equals(fetchFlushEx.filters()); } - } public final class KafkaMergedFlushExMatcherBuilder { - private Array32FW.Builder progressRW; - private KafkaKeyFW.Builder keyRW; - private KafkaOffsetFW.Builder partitionRW; - private KafkaCapabilities capabilities; - - private Array32FW.Builder filtersRW; + KafkaMergedFetchFlushEx mergedFetchFlush; private KafkaMergedFlushExMatcherBuilder() { } - public KafkaMergedFlushExMatcherBuilder capabilities( - String capabilities) + public boolean match( + KafkaFlushExFW kafkaFlushEx) { - this.capabilities = KafkaCapabilities.valueOf(capabilities); - return this; + boolean matched = false; + if (kafkaFlushEx.merged().kind() == KafkaApi.FETCH.value()) + { + matched = fetch().match(kafkaFlushEx); + } + return matched; } - public KafkaMergedFlushExMatcherBuilder progress( - int partitionId, - long offset) + public KafkaMergedFetchFlushEx fetch() { - progress(partitionId, offset, DEFAULT_LATEST_OFFSET); - return this; + if (mergedFetchFlush == null) + { + mergedFetchFlush = new KafkaMergedFetchFlushEx(); + } + return mergedFetchFlush; } - public KafkaMergedFlushExMatcherBuilder progress( - int partitionId, - long offset, - long latestOffset) + public final class KafkaMergedFetchFlushEx { - if (progressRW == null) + private Array32FW.Builder progressRW; + private KafkaKeyFW.Builder keyRW; + private KafkaOffsetFW.Builder partitionRW; + private KafkaCapabilities capabilities; + + private Array32FW.Builder filtersRW; + + private KafkaMergedFetchFlushEx() { - this.progressRW = new Array32FW.Builder<>(new KafkaOffsetFW.Builder(), new KafkaOffsetFW()) - .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); } - progressRW.item(i -> i.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset)); - return this; - } - public KafkaMergedFlushExMatcherBuilder progress( - int partitionId, - long offset, - long stableOffset, - long latestOffset) - { - if (progressRW == null) + + public KafkaMergedFetchFlushEx capabilities( + String capabilities) { - this.progressRW = new Array32FW.Builder<>(new KafkaOffsetFW.Builder(), new KafkaOffsetFW()) + this.capabilities = KafkaCapabilities.valueOf(capabilities); + return this; + } + + public KafkaMergedFetchFlushEx progress( + int partitionId, + long offset) + { + progress(partitionId, offset, DEFAULT_LATEST_OFFSET); + return this; + } + + public KafkaMergedFetchFlushEx progress( + int partitionId, + long offset, + long latestOffset) + { + if (progressRW == null) + { + this.progressRW = new Array32FW.Builder<>(new KafkaOffsetFW.Builder(), new KafkaOffsetFW()) .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + } + progressRW.item(i -> i.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset)); + return this; } - progressRW.item(i -> i + + public KafkaMergedFetchFlushEx progress( + int partitionId, + long offset, + long stableOffset, + long latestOffset) + { + if (progressRW == null) + { + this.progressRW = new Array32FW.Builder<>(new KafkaOffsetFW.Builder(), new KafkaOffsetFW()) + .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + } + progressRW.item(i -> i .partitionId(partitionId) .partitionOffset(offset) .stableOffset(stableOffset) .latestOffset(latestOffset)); - return this; - } - - public KafkaMergedFlushExMatcherBuilder partition( - int partitionId, - long offset) - { - partition(partitionId, offset, DEFAULT_LATEST_OFFSET); - return this; - } - - public KafkaMergedFlushExMatcherBuilder partition( - int partitionId, - long offset, - long latestOffset) - { - assert partitionRW == null; - partitionRW = new KafkaOffsetFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + return this; + } - partitionRW.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset); + public KafkaMergedFetchFlushEx partition( + int partitionId, + long offset) + { + partition(partitionId, offset, DEFAULT_LATEST_OFFSET); + return this; + } - return this; - } + public KafkaMergedFetchFlushEx partition( + int partitionId, + long offset, + long latestOffset) + { + assert partitionRW == null; + partitionRW = new KafkaOffsetFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - public KafkaMergedFlushExMatcherBuilder key( - String key) - { - assert keyRW == null; - keyRW = new KafkaKeyFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + partitionRW.partitionId(partitionId).partitionOffset(offset).latestOffset(latestOffset); - if (key == null) - { - keyRW.length(-1) - .value((OctetsFW) null); + return this; } - else + + public KafkaMergedFetchFlushEx key( + String key) { - keyRO.wrap(key.getBytes(UTF_8)); - keyRW.length(keyRO.capacity()) - .value(keyRO, 0, keyRO.capacity()); - } + assert keyRW == null; + keyRW = new KafkaKeyFW.Builder().wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); - return this; - } + if (key == null) + { + keyRW.length(-1) + .value((OctetsFW) null); + } + else + { + keyRO.wrap(key.getBytes(UTF_8)); + keyRW.length(keyRO.capacity()) + .value(keyRO, 0, keyRO.capacity()); + } - public KafkaFilterBuilder filter() - { - if (filtersRW == null) - { - filtersRW = new Array32FW.Builder<>(new KafkaFilterFW.Builder(), new KafkaFilterFW()) - .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + return this; } - return new KafkaFilterBuilder<>() + public KafkaFilterBuilder + + filter() { - - @Override - protected KafkaMergedFlushExMatcherBuilder build( - KafkaFilterFW filter) + if (filtersRW == null) { - filtersRW.item(fb -> set(fb, filter)); - return KafkaMergedFlushExMatcherBuilder.this; + filtersRW = new Array32FW.Builder<>(new KafkaFilterFW.Builder(), new KafkaFilterFW()) + .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); } - }; - } - public KafkaFlushExMatcherBuilder build() - { - return KafkaFlushExMatcherBuilder.this; - } + return new KafkaFilterBuilder<>() + { + @Override + protected KafkaFlushExMatcherBuilder.KafkaMergedFlushExMatcherBuilder.KafkaMergedFetchFlushEx + build( + KafkaFilterFW filter) + { + filtersRW.item(fb -> set(fb, filter)); + return KafkaMergedFetchFlushEx.this; + } + }; + } - private boolean match( - KafkaFlushExFW flushEx) - { - final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); - return matchCapabilities(mergedFlushEx) && - matchProgress(mergedFlushEx) && - matchKey(mergedFlushEx) && - matchPartition(mergedFlushEx) && - matchFilters(mergedFlushEx); - } + public KafkaFlushExMatcherBuilder build() + { + return KafkaFlushExMatcherBuilder.this; + } - private boolean matchCapabilities( - final KafkaMergedFlushExFW mergedFlushEx) - { - return capabilities == null || capabilities.equals(mergedFlushEx.capabilities().get()); - } + private boolean match( + KafkaFlushExFW flushEx) + { + final KafkaMergedFetchFlushExFW mergedFlushEx = flushEx.merged().fetch(); + return matchCapabilities(mergedFlushEx) && + matchProgress(mergedFlushEx) && + matchKey(mergedFlushEx) && + matchPartition(mergedFlushEx) && + matchFilters(mergedFlushEx); + } - private boolean matchProgress( - final KafkaMergedFlushExFW mergedFlushEx) - { - return progressRW == null || progressRW.build().equals(mergedFlushEx.progress()); - } + private boolean matchCapabilities( + final KafkaMergedFetchFlushExFW mergedFlushEx) + { + return capabilities == null || capabilities.equals(mergedFlushEx.capabilities().get()); + } - private boolean matchPartition( - final KafkaMergedFlushExFW mergedFlushEx) - { - return partitionRW == null || partitionRW.build().equals(mergedFlushEx.partition()); - } + private boolean matchProgress( + final KafkaMergedFetchFlushExFW mergedFlush) + { + return progressRW == null || progressRW.build().equals(mergedFlush.progress()); + } - private boolean matchKey( - final KafkaMergedFlushExFW mergedFlushEx) - { - return keyRW == null || keyRW.build().equals(mergedFlushEx.key()); - } + private boolean matchPartition( + final KafkaMergedFetchFlushExFW mergedFlush) + { + return partitionRW == null || partitionRW.build().equals(mergedFlush.partition()); + } - private boolean matchFilters( - final KafkaMergedFlushExFW mergedFlushEx) - { - return filtersRW == null || filtersRW.build().equals(mergedFlushEx.filters()); + private boolean matchKey( + final KafkaMergedFetchFlushExFW mergedFlush) + { + return keyRW == null || keyRW.build().equals(mergedFlush.key()); + } + + private boolean matchFilters( + final KafkaMergedFetchFlushExFW mergedFlush) + { + return filtersRW == null || filtersRW.build().equals(mergedFlush.filters()); + } } } @@ -3605,6 +3806,89 @@ private boolean matchKey( return keyRW == null || keyRW.build().equals(produceFlushEx.key()); } } + + public final class KafkaGroupFlushExMatchBuilder + { + private String16FW leaderId; + private String16FW memberId; + private Array32FW.Builder membersRW; + + private KafkaGroupFlushExMatchBuilder() + { + } + + public KafkaGroupFlushExMatchBuilder leaderId( + String leaderId) + { + this.leaderId = new String16FW(leaderId); + return this; + } + + public KafkaGroupFlushExMatchBuilder memberId( + String memberId) + { + this.memberId = new String16FW(memberId); + return this; + } + + public KafkaGroupFlushExMatchBuilder members( + String memberId, + String metadata) + { + if (membersRW == null) + { + this.membersRW = new Array32FW.Builder<>(new KafkaGroupMemberFW.Builder(), new KafkaGroupMemberFW()) + .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + } + this.membersRW.item(m -> m.id(memberId).metadataLen(metadata.length()) + .metadata(md -> md.set(metadata.getBytes()))); + return this; + } + + public KafkaGroupFlushExMatchBuilder members( + String memberId) + { + if (membersRW == null) + { + this.membersRW = new Array32FW.Builder<>(new KafkaGroupMemberFW.Builder(), new KafkaGroupMemberFW()) + .wrap(new UnsafeBuffer(new byte[1024]), 0, 1024); + } + this.membersRW.item(m -> m.id(memberId)); + return this; + } + + public KafkaFlushExMatcherBuilder build() + { + return KafkaFlushExMatcherBuilder.this; + } + + private boolean match( + KafkaFlushExFW flushEx) + { + final KafkaGroupFlushExFW groupFlushEx = flushEx.group(); + return matchLeaderId(groupFlushEx) && + matchMemberId(groupFlushEx) && + matchMembers(groupFlushEx); + } + + private boolean matchLeaderId( + final KafkaGroupFlushExFW groupFLushEx) + { + return leaderId == null || leaderId.equals(groupFLushEx.leaderId()); + } + + private boolean matchMemberId( + final KafkaGroupFlushExFW groupFLushEx) + { + return memberId == null || memberId.equals(groupFLushEx.memberId()); + } + + private boolean matchMembers( + final KafkaGroupFlushExFW groupFLushEx) + { + return membersRW == null || membersRW.build().equals(groupFLushEx.members()); + } + } } public static final class KafkaBeginExMatcherBuilder @@ -3978,6 +4262,8 @@ public final class KafkaGroupBeginExMatcherBuilder private String16FW protocol; private int timeout; + private byte[] metadata; + private KafkaGroupBeginExMatcherBuilder() { } @@ -4003,6 +4289,13 @@ public KafkaGroupBeginExMatcherBuilder timeout( return this; } + public KafkaGroupBeginExMatcherBuilder metadata( + byte[] metadata) + { + this.metadata = metadata; + return this; + } + public KafkaBeginExMatcherBuilder build() { return KafkaBeginExMatcherBuilder.this; @@ -4015,7 +4308,8 @@ private boolean match( return matchGroupId(groupBeginEx) && matchGroupId(groupBeginEx) && matchProtocol(groupBeginEx) && - matchTimeout(groupBeginEx); + matchTimeout(groupBeginEx) && + matchMetadata(groupBeginEx); } private boolean matchGroupId( @@ -4035,6 +4329,13 @@ private boolean matchTimeout( { return timeout == 0 || timeout == groupBeginExFW.timeout(); } + + private boolean matchMetadata( + final KafkaGroupBeginExFW groupBeginExFW) + { + OctetsFW metadata = groupBeginExFW.metadata(); + return this.metadata == null || metadata.sizeof() == this.metadata.length; + } } public final class KafkaMergedBeginExMatcherBuilder diff --git a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl index 29b072619f..fb0078d524 100644 --- a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl +++ b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl @@ -195,7 +195,6 @@ scope kafka union KafkaDataEx switch (uint8) extends core::stream::Extension { case 252: kafka::stream::KafkaConsumerDataEx consumer; - case 253: kafka::stream::KafkaGroupDataEx group; case 255: kafka::stream::KafkaMergedDataEx merged; case 3: kafka::stream::KafkaMetaDataEx meta; case 8: kafka::stream::KafkaOffsetCommitDataEx offsetCommit; @@ -230,6 +229,7 @@ scope kafka string16 topic; string16 groupId = null; string16 consumerId = null; + int32 timeout = 0; KafkaOffset[] partitions; KafkaFilter[] filters; // ORed KafkaEvaluation evaluation = LAZY; @@ -251,7 +251,13 @@ scope kafka KafkaHeader[] headers; // INIT + FIN (produce), INIT only (fetch) } - struct KafkaMergedFlushEx + union KafkaMergedFlushEx switch (uint8) + { + case 252: kafka::stream::KafkaMergedConsumerFlushEx consumer; + case 1: kafka::stream::KafkaMergedFetchFlushEx fetch; + } + + struct KafkaMergedFetchFlushEx { KafkaOffset partition; KafkaOffset[] progress; @@ -260,9 +266,9 @@ scope kafka KafkaKey key; } - struct KafkaGroupFlushEx + struct KafkaMergedConsumerFlushEx { - KafkaOffset partition; + KafkaOffset partition; } struct KafkaMetaBeginEx @@ -343,41 +349,71 @@ scope kafka KafkaHeader[] headers; } + struct KafkaTopicPartition + { + int32 partitionId; + } + + struct KafkaGroupTopicMetadata + { + string16 topic; + KafkaTopicPartition[] partitions; + } + + struct KafkaGroupMemberMetadata + { + string16 consumerId; + KafkaGroupTopicMetadata[] topics; + } + struct KafkaGroupBeginEx { string16 groupId; string16 protocol; int32 timeout; + varint32 metadataLen; + octets[metadataLen] metadata = null; } - struct KafkaGroupDataEx + struct KafkaGroupMember { - string16 leaderId; - string16 memberId; - int32 members; + string16 id; + varint32 metadataLen; + octets[metadataLen] metadata = null; } - struct TopicPartition + struct KafkaGroupFlushEx { - int32 partitionId; + string16 leaderId; + string16 memberId; + KafkaGroupMember[] members; } struct KafkaConsumerBeginEx { string16 groupId; + string16 consumerId; + int32 timeout; string16 topic; - TopicPartition[] partitionIds; + KafkaTopicPartition[] partitionIds; + } + + struct KafkaConsumerAssignment + { + string16 consumerId; + KafkaTopicPartition[] partitions; } struct KafkaConsumerDataEx { - TopicPartition[] partitions; + KafkaTopicPartition[] partitions; + KafkaConsumerAssignment[] assignments; } struct KafkaOffsetFetchTopic { string16 topic; - TopicPartition[] partitions; + KafkaTopicPartition[] partitions; } struct KafkaOffsetFetchBeginEx @@ -409,4 +445,36 @@ scope kafka int64 partitionOffset; } } + + scope rebalance + { + struct TopicPartition + { + int32 partitionId; + } + + struct ConsumerAssignment + { + string16 consumerId; + TopicPartition[] partitions; + } + + struct TopicAssignment + { + string16 topic; + TopicPartition[] partitions; + ConsumerAssignment[] userdata; + } + + struct MemberAssignment + { + string16 memberId; + TopicAssignment[] assignments; + } + + struct PartitionIndex + { + int32 index; + } + } } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt new file mode 100644 index 0000000000..900ec59160 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt @@ -0,0 +1,40 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .assignment("localhost:9092", 0) + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt new file mode 100644 index 0000000000..17e8dc42c5 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt @@ -0,0 +1,48 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test") + .partition(0) + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .assignment("localhost:9092", 0) + .build() + .build()} + +write zilla:data.empty +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt index cb2deade38..cf59d81824 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt @@ -40,13 +40,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() write advise zilla:flush -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt index 52b3df2fa1..7076315b97 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt @@ -45,12 +45,16 @@ write flush read advised zilla:flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt index e1d426f7bc..ee170242ea 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/client.rpt @@ -38,13 +38,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt index e294b4e29e..73e30d082e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader/server.rpt @@ -43,12 +43,16 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt new file mode 100644 index 0000000000..282b24d83b --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt @@ -0,0 +1,64 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build()) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build()) + .build() + .build()} + +write ${kafka:memberAssignment() + .member("memberId-1", "test", 0, "localhost:9092", 0) + .build()} +write flush + +read ${kafka:topicAssignment() + .topic("test", 0, "localhost:9092", 0) + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt new file mode 100644 index 0000000000..8367f6fc54 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt @@ -0,0 +1,69 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build()) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build()) + .build() + .build()} + +read ${kafka:memberAssignment() + .member("memberId-1", "test", 0, "localhost:9092", 0) + .build()} + +write ${kafka:topicAssignment() + .topic("test", 0, "localhost:9092", 0) + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt index 0ad5b88eb7..be863d5e6f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -38,15 +38,19 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} -read zilla:data.ext ${kafka:dataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty read notify ROUTED_BROKER_SERVER @@ -76,12 +80,16 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt index 612219b422..63f3642b46 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -43,14 +43,18 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush read abort @@ -78,13 +82,16 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -write flush +read zilla:data.empty + +write zilla:data.empty +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt index 97fe944ccb..d53487e538 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt @@ -38,27 +38,36 @@ read zilla:begin.ext ${kafka:matchBeginEx() .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty write advise zilla:flush -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(2) + .members("memberId-1") + .members("memberId-2") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty write close read closed diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt index 3cad0f6c66..ca3b929e93 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt @@ -43,27 +43,35 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush read advised zilla:flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(2) + .members("memberId-1") + .members("memberId-2") .build() .build()} -write flush + +read zilla:data.empty + +write zilla:data.empty read closed write close diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt index d148031a57..a6a92ea2ad 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt @@ -38,15 +38,19 @@ read zilla:begin.ext ${kafka:beginEx() .build() .build()} -read zilla:data.ext ${kafka:matchDataEx() +read advised zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} -read zilla:data.null + +write zilla:data.empty +write flush + +read zilla:data.empty read notify ROUTED_BROKER_SERVER diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt index b2ffbdad10..09f08d8a9b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt @@ -43,14 +43,18 @@ write zilla:begin.ext ${kafka:beginEx() .build()} write flush -write zilla:data.ext ${kafka:dataEx() +write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .group() .leaderId("memberId-1") .memberId("memberId-1") - .members(1) + .members("memberId-1") .build() .build()} + +read zilla:data.empty + +write zilla:data.empty write flush rejected diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt new file mode 100644 index 0000000000..6937ab1880 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt @@ -0,0 +1,65 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +write zilla:data.empty +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt new file mode 100644 index 0000000000..5ca156ca20 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt @@ -0,0 +1,69 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +read zilla:data.empty + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +read zilla:data.empty + +write zilla:data.empty +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/client.rpt index 46b8645895..bee1c37b8a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/client.rpt @@ -38,6 +38,7 @@ connected read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 2, 2, 2) .build() @@ -57,6 +58,7 @@ read "Hello, world #A5" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .capabilities("FETCH_ONLY") .filter() .headers("header3") diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/server.rpt index 131b4661c7..0662c289a2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.change/server.rpt @@ -43,6 +43,7 @@ connected write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 2, 2, 2) .build() @@ -64,6 +65,7 @@ write flush read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .filter() .headers("header3") .sequence("one") diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/client.rpt index 65797fa317..95bb8dc375 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/client.rpt @@ -46,6 +46,7 @@ read "Hello, world #A1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/server.rpt index e6aef8b810..646925b42c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.header.with.compaction/server.rpt @@ -51,6 +51,7 @@ write "Hello, world #A1" write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} @@ -102,4 +103,4 @@ write zilla:data.ext ${kafka:dataEx() .header("header2", "value2") .build() .build()} -write "Hello, world #A4" \ No newline at end of file +write "Hello, world #A4" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/client.rpt index 979c34e4c9..3f68ef3c90 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/client.rpt @@ -83,6 +83,7 @@ read notify RECEIVED_MESSAGE_B2 read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 1, 2) .progress(1, 3, 1, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/server.rpt index 0a10a9bbb4..edd7106c28 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none.read.uncommitted/server.rpt @@ -92,6 +92,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 1, 2) .progress(1, 3, 1, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/client.rpt index be72e1a5a0..593b4fc303 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/client.rpt @@ -82,6 +82,7 @@ read notify RECEIVED_MESSAGE_B2 read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/server.rpt index 592b71500c..99dc1173f7 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.none/server.rpt @@ -91,6 +91,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/client.rpt index 2996614512..c178993714 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/client.rpt @@ -43,6 +43,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 2, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/server.rpt index 061d4156ab..bd995750f6 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.filter.sync/server.rpt @@ -49,6 +49,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .progress(1, 2, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/client.rpt index ab1d6d8a79..70956daee5 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/client.rpt @@ -83,6 +83,7 @@ read notify RECEIVED_MESSAGE_B2 read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/server.rpt index c19fa6f57c..66873f85e8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.isolation.read.committed/server.rpt @@ -92,6 +92,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/client.rpt index dea36332fb..1a4b4e8acb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/client.rpt @@ -74,6 +74,7 @@ read "Hello, world #B2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/server.rpt index 6eb28b40cb..9b31a8b5de 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.fetch.message.values/server.rpt @@ -87,6 +87,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 3, 2, 2) .progress(1, 3, 2, 2) .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt new file mode 100644 index 0000000000..dc96fdf72f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .partition(0, 1) + .partition(1, 1) + .partition(-1, 1) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .partition(0, 1, 2) + .progress(0, 2) + .build() + .build()} +read "Hello, world #A1" + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt new file mode 100644 index 0000000000..cadddb5324 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt @@ -0,0 +1,51 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("FETCH_ONLY") + .topic("test") + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .partition(0, 1) + .partition(1, 1) + .partition(-1, 1) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .progress(0, 2) + .build() + .build()} +write "Hello, world #A1" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/client.rpt index f8c85d060b..ffb8003d11 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/client.rpt @@ -46,6 +46,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .key("key7") @@ -77,6 +78,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .key("key7") @@ -108,6 +110,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .key("key9") .capabilities("PRODUCE_ONLY") @@ -129,6 +132,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .key("key9") diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/server.rpt index 42d6dc55dd..c5e536ab1f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic.hashed/server.rpt @@ -43,6 +43,7 @@ read "Hello, world #A1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .key("key7") .build() @@ -69,6 +70,7 @@ read "Hello, world #A2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .key("key7") .build() @@ -95,9 +97,10 @@ read "Hello, world #C1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .partition(-1, -1) - .key("key9") - .build() + .fetch() + .partition(-1, -1) + .key("key9") + .build() .build()} read zilla:data.ext ${kafka:matchDataEx() @@ -112,7 +115,8 @@ read "Hello, world #C2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() - .partition(-1, -1) - .key("key9") - .build() + .fetch() + .partition(-1, -1) + .key("key9") + .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/client.rpt index 5277c6dc7a..d40c360bc3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/client.rpt @@ -45,6 +45,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .build() @@ -73,6 +74,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .build() @@ -91,6 +93,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .build() @@ -119,6 +122,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .capabilities("PRODUCE_ONLY") .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/server.rpt index 1eca6de001..67ef157d76 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush.dynamic/server.rpt @@ -42,6 +42,7 @@ read "Hello, world #A1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .build() .build()} @@ -66,6 +67,7 @@ read "Hi, world #C1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .build() .build()} @@ -81,6 +83,7 @@ read "Hello, world #A2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .build() .build()} @@ -105,6 +108,7 @@ read "Hi, world #C2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(-1, -1) .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/client.rpt index 7731d53481..7f787cba3f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/client.rpt @@ -45,6 +45,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(0, 1) .capabilities("PRODUCE_ONLY") .build() @@ -73,6 +74,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(0, 2) .capabilities("PRODUCE_ONLY") .build() @@ -101,6 +103,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(2, 1) .capabilities("PRODUCE_ONLY") .build() @@ -119,7 +122,8 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(2, 2) .capabilities("PRODUCE_ONLY") .build() - .build()} \ No newline at end of file + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/server.rpt index 985213206a..96c3563c19 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.produce.flush/server.rpt @@ -42,6 +42,7 @@ read "Hello, world #A1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(0, 1) .build() .build()} @@ -65,6 +66,7 @@ read "Hello, world #A2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(0, 2) .build() .build()} @@ -88,6 +90,7 @@ read "Hi, world #C1" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(2, 1) .build() .build()} @@ -103,6 +106,7 @@ read "Hi, world #C2" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .partition(2, 2) .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt new file mode 100644 index 0000000000..e408dfe143 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt @@ -0,0 +1,170 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .build() + .build()} + + +read notify PARTITION_COUNT_2 + +connect await PARTITION_COUNT_2 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .assignment("localhost:9092", 0) + .build() + .build()} + +read notify RECEIVED_CONSUMER + +connect await RECEIVED_CONSUMER + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .fetch() + .partition(0, 1, 2) + .build() + .build()} +read "Hello, world #A1" + + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt new file mode 100644 index 0000000000..a0f622b99c --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt @@ -0,0 +1,168 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .build() + .build()} +write flush + + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test") + .partition(0) + .partition(1) + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .assignment("localhost:9092", 0) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, -2) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .fetch() + .topic("test") + .partition(0, 1, 2) + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .fetch() + .timestamp(newTimestamp) + .partition(0, 1, 2) + .build() + .build()} +write "Hello, world #A1" +write flush + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/client.rpt new file mode 100644 index 0000000000..e80abbc037 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/client.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-1") + .topic("test") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .partitionId(0) + .partitionOffset(1) + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/server.rpt new file mode 100644 index 0000000000..9c7c06b8c2 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.commit/commit.offset/server.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .groupId("client-1") + .topic("test") + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetCommit() + .partition(0) + .partition(1) + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/client.rpt new file mode 100644 index 0000000000..ece4511733 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/client.rpt @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-1") + .topic("test", 0) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .topic("test", 0, 1) + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/server.rpt new file mode 100644 index 0000000000..cd7d3ce9d0 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/partition.offset/server.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .groupId("client-1") + .topic("test", 0) + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .offsetFetch() + .topic("test", 0, 1) + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt index b31879fae3..f06928bf4b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt @@ -66,9 +66,51 @@ read 35 # size write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt index c8ee99cba2..1071dc79f8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt @@ -66,6 +66,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt index 661394f51f..b154dda2f3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt @@ -49,9 +49,51 @@ read 35 # size write close read abort -read notify ROUTED_BROKER_SERVER_FIRST +read notify ROUTED_CLUSTER_SERVER_FIRST -connect await ROUTED_BROKER_SERVER_FIRST +connect await ROUTED_CLUSTER_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -117,9 +159,9 @@ read 35 # size write close read abort -read notify ROUTED_BROKER_SERVER_THIRD +read notify ROUTED_CLUSTER_SERVER_SECOND -connect await ROUTED_BROKER_SERVER_THIRD +connect await ROUTED_CLUSTER_SERVER_SECOND "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -127,6 +169,49 @@ connect await ROUTED_BROKER_SERVER_THIRD connected +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER_SECOND + +connect await ROUTED_DESCRIBE_SERVER_SECOND + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + + +connected + write 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt index 0501e11c66..89a6646f74 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 @@ -105,6 +141,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt index 3ebf38ef03..205d723a8e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt index dd66906ce9..e5781e833c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt index 4fece6bbfb..3ffa6e2ca3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER_FIRST -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER_FIRST + +connect await ROUTED_DESCRIBE_SERVER_FIRST "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -131,16 +173,59 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER_THIRD +read notify ROUTED_CLUSTER_SERVER_SECOND -connect await ROUTED_BROKER_SERVER_THIRD "zilla://streams/net0" +connect await ROUTED_CLUSTER_SERVER_SECOND + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER_SECOND + +connect await ROUTED_DESCRIBE_SERVER_SECOND + "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" option zilla:byteorder "network" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt index 62dd9d1a4f..353e127383 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:newRequestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${newRequestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 @@ -57,7 +93,7 @@ read 105 # size 4s "test" # consumer group 30000 # session timeout 4000 # rebalance timeout - 0s # consumer group member + 0s # consumer group member 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol @@ -116,9 +152,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -127,6 +163,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 115 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt index d395166ffd..094ebb3212 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt @@ -42,16 +42,16 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -59,7 +59,49 @@ connect await ROUTED_BROKER_SERVER connected -write 105 # size +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 105 # size 11s # join group 5s # v5 ${newRequestId} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt index cf3f407a26..614a5cd0b6 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt index 2bfabe1123..ed4521242e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt index 099f0a0a68..788be98c4c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt index c73a41889a..3f523d8122 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 #port + 0 # coordinator node + 9s "localhost" # host + 9092 #port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt index 75e53e1bbd..4c1030709a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 102 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt index 6dea7afae8..1e7fa59c00 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt @@ -42,16 +42,58 @@ read 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt index e3a812d391..90912d4823 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt @@ -38,9 +38,9 @@ write 35 # size 0 # throttle time 0s # no error 4s "none" # error message none - 1 # coordinator node - 9s "localhost" # host - 9092 # port + 0 # coordinator node + 9s "localhost" # host + 9092 # port read closed write aborted @@ -49,6 +49,42 @@ accepted connected +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + read 105 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt index 6f082da9a8..54c9960f67 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt @@ -71,16 +71,87 @@ read 35 # size 0 #throttle time 0s #no error 4s "none" #error message none - 1 #coordinator node - 9s "localhost" #host - 9092 #port + 0 #coordinator node + 9s "localhost" #host + 9092 #port write close read abort -read notify ROUTED_BROKER_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_BROKER_SERVER +connect await ROUTED_CLUSTER_SERVER + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 17 # size + 17s # sasl.handshake + 1s # v1 + ${newRequestId} + -1s # no client id + 5s "PLAIN" # mechanism + +read 17 # size + ${newRequestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +write 32 # size + 36s # sasl.authenticate + 1s # v1 + ${newRequestId} + -1s # no client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +read 20 # size + ${newRequestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER + +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt index 44d6e465b8..3b7326fcfb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt @@ -67,13 +67,79 @@ write 35 # size 0 #throttle time 0s #no error 4s "none" #error message none - 1 #coordinator node - 9s "localhost" #host - 9092 #port + 0 #coordinator node + 9s "localhost" #host + 9092 #port read closed write aborted +accepted + +connected + +read 17 # size + 17s # sasl.handshake + 1s # v1 + (int:requestId) + -1s # no client id + 5s "PLAIN" # mechanism + +write 17 # size + ${requestId} + 0s # no error + 1 # mechanisms + 5s "PLAIN" # PLAIN + +read 32 # size + 36s # sasl.authenticate + 1s # v1 + (int:requestId) + -1s # no client id + 18 + [0x00] "username" # authentication bytes + [0x00] "password" + +write 20 # size + ${requestId} + 0s # no error + -1 + -1s # authentication bytes + 0L # session lifetime + +read 82 # size + 32s # describe configs + 0s # v0 + (int:requestId) + -1s # no client id + 1 # resources + [0x01] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${requestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x01] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + + accepted connected diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt new file mode 100644 index 0000000000..11f8fe8d16 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 38 # size + 9s # offset fetch + 0s # v0 + ${newRequestId} + -1s # no client id + 8s "client-1" # group id + 1 # topics + 4s "test" # "test" topic + 1 # partitions + 0 # partition + +read 30 # size + 1 # topics + 4s "test" # "test" topic + 1 # partitions + 0 # partition index + 1L # committed offset + -1s # metadata + 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt new file mode 100644 index 0000000000..fc324de30e --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkAcceptWindow 8192 + +accept "zilla://streams/net0" + option zilla:window ${networkAcceptWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 38 # size + 9s # offset fetch + 0s # v0 + (int:newRequestId) + -1s # no client id + 8s "client-1" # group id + 1 # topics + 4s "test" # "test" topic + 1 # partitions + 0 # partition + +write 30 # size + 1 # topics + 4s "test" # "test" topic + 1 # partitions + 0 # partition index + 1L # committed offset + -1s # metadata + 0s # no error diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index f7d4359c7c..e81ca95da0 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -57,6 +57,8 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaTransactionResult; import io.aklivity.zilla.specs.binding.kafka.internal.types.KafkaValueMatchFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.MemberAssignmentFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.TopicAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaApi; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaBootstrapBeginExFW; @@ -70,8 +72,8 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFetchFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; -import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedDataExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedFlushExFW; @@ -99,6 +101,55 @@ public void setUp() throws Exception ctx = new ExpressionContext(); } + @Test + public void shouldGenerateMemberMetadata() + { + byte[] build = KafkaFunctions.memberMetadata() + .consumerId("localhost:9092") + .topic("test", 0) + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaGroupMemberMetadataFW memberMetadata = + new KafkaGroupMemberMetadataFW().wrap(buffer, 0, buffer.capacity()); + + assertEquals("localhost:9092", memberMetadata.consumerId().asString()); + } + + @Test + public void shouldGenerateMemberAssignment() + { + byte[] build = KafkaFunctions.memberAssignment() + .member("memberId-1", "test", 0, "localhost:9092", 0) + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + Array32FW assignments = + new Array32FW<>(new MemberAssignmentFW()).wrap(buffer, 0, buffer.capacity()); + + assignments.forEach(a -> + { + assertEquals("memberId-1", a.memberId().asString()); + }); + } + + @Test + public void shouldGenerateTopicAssignment() + { + byte[] build = KafkaFunctions.topicAssignment() + .topic("test", 0, "localhost:9092", 0) + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + Array32FW topics = + new Array32FW<>(new TopicAssignmentFW()).wrap(buffer, 0, buffer.capacity()); + + topics.forEach(t -> + { + assertEquals("test", t.topic().asString()); + }); + } + @Test public void shouldGenerateBootstrapBeginExtension() { @@ -881,11 +932,12 @@ public void shouldGenerateMergedDataExtensionWithNullKeyAndNullByteArrayHeaderVa } @Test - public void shouldGenerateMergedFlushExtension() + public void shouldGenerateMergedFetchFlushExtension() { byte[] build = KafkaFunctions.flushEx() .typeId(0x01) .merged() + .fetch() .partition(1, 2) .capabilities("PRODUCE_AND_FETCH") .progress(0, 1L) @@ -905,26 +957,26 @@ public void shouldGenerateMergedFlushExtension() final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); final MutableInteger partitionsCount = new MutableInteger(); - mergedFlushEx.progress().forEach(f -> partitionsCount.value++); + mergedFlushEx.fetch().progress().forEach(f -> partitionsCount.value++); assertEquals(1, partitionsCount.value); - assertNotNull(mergedFlushEx.progress() + assertNotNull(mergedFlushEx.fetch().progress() .matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L)); - assertEquals(mergedFlushEx.key().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)), "key"); - assertEquals(mergedFlushEx.partition().partitionId(), 1); - assertEquals(mergedFlushEx.partition().partitionOffset(), 2); + assertEquals(mergedFlushEx.fetch().key().value().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)), "key"); + assertEquals(mergedFlushEx.fetch().partition().partitionId(), 1); + assertEquals(mergedFlushEx.fetch().partition().partitionOffset(), 2); final MutableInteger filterCount = new MutableInteger(); - mergedFlushEx.filters().forEach(f -> filterCount.value++); + mergedFlushEx.fetch().filters().forEach(f -> filterCount.value++); assertEquals(2, filterCount.value); - assertNotNull(mergedFlushEx.filters() + assertNotNull(mergedFlushEx.fetch().filters() .matchFirst(f -> f.conditions() .matchFirst(c -> c.kind() == KEY.value() && "match".equals(c.key() .value() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null)); - assertNotNull(mergedFlushEx.filters() + assertNotNull(mergedFlushEx.fetch().filters() .matchFirst(f -> f.conditions() .matchFirst(c -> c.kind() == HEADER.value() && "name".equals(c.header().name() @@ -934,11 +986,12 @@ public void shouldGenerateMergedFlushExtension() } @Test - public void shouldGenerateMergedFlushExtensionWithStableOffset() + public void shouldGenerateMergedFetchFlushExtensionWithStableOffset() { byte[] build = KafkaFunctions.flushEx() .typeId(0x01) .merged() + .fetch() .partition(0, 1L, 1L) .capabilities("PRODUCE_AND_FETCH") .progress(0, 1L, 1L, 1L) @@ -958,29 +1011,29 @@ public void shouldGenerateMergedFlushExtensionWithStableOffset() final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); final MutableInteger partitionsCount = new MutableInteger(); - mergedFlushEx.progress().forEach(f -> partitionsCount.value++); + mergedFlushEx.fetch().progress().forEach(f -> partitionsCount.value++); assertEquals(1, partitionsCount.value); - assertEquals(mergedFlushEx.partition().partitionId(), 0); - assertEquals(mergedFlushEx.partition().partitionOffset(), 1L); - assertEquals(mergedFlushEx.partition().latestOffset(), 1L); + assertEquals(mergedFlushEx.fetch().partition().partitionId(), 0); + assertEquals(mergedFlushEx.fetch().partition().partitionOffset(), 1L); + assertEquals(mergedFlushEx.fetch().partition().latestOffset(), 1L); - assertNotNull(mergedFlushEx.progress() + assertNotNull(mergedFlushEx.fetch().progress() .matchFirst(p -> p.partitionId() == 0 && p.partitionOffset() == 1L && p.stableOffset() == 1L && p.latestOffset() == 1L)); final MutableInteger filterCount = new MutableInteger(); - mergedFlushEx.filters().forEach(f -> filterCount.value++); + mergedFlushEx.fetch().filters().forEach(f -> filterCount.value++); assertEquals(2, filterCount.value); - assertNotNull(mergedFlushEx.filters() + assertNotNull(mergedFlushEx.fetch().filters() .matchFirst(f -> f.conditions() .matchFirst(c -> c.kind() == KEY.value() && "match".equals(c.key() .value() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null)); - assertNotNull(mergedFlushEx.filters() + assertNotNull(mergedFlushEx.fetch().filters() .matchFirst(f -> f.conditions() .matchFirst(c -> c.kind() == HEADER.value() && "name".equals(c.header().name() @@ -989,6 +1042,27 @@ public void shouldGenerateMergedFlushExtensionWithStableOffset() .get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o)))) != null)); } + @Test + public void shouldGenerateMergedConsumerFlushExtension() + { + byte[] build = KafkaFunctions.flushEx() + .typeId(0x01) + .merged() + .consumer() + .partition(1, 2) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x1, flushEx.typeId()); + + final KafkaMergedFlushExFW mergedFlushEx = flushEx.merged(); + + assertEquals(mergedFlushEx.consumer().partition().partitionId(), 1); + assertEquals(mergedFlushEx.consumer().partition().partitionOffset(), 2); + } + @Test public void shouldMatchMergedDataExtension() throws Exception { @@ -2120,7 +2194,10 @@ public void shouldGenerateGroupFlushExtension() byte[] build = KafkaFunctions.flushEx() .typeId(0x01) .group() - .partition(0, 1L) + .leaderId("consumer-1") + .memberId("consumer-2") + .members("memberId-1", "test".getBytes()) + .members("memberId-2", "test".getBytes()) .build() .build(); @@ -2129,9 +2206,36 @@ public void shouldGenerateGroupFlushExtension() assertEquals(0x01, flushEx.typeId()); final KafkaGroupFlushExFW groupFlushEx = flushEx.group(); - final KafkaOffsetFW partition = groupFlushEx.partition(); - assertEquals(0, partition.partitionId()); - assertEquals(1L, partition.partitionOffset()); + final String leaderId = groupFlushEx.leaderId().asString(); + final String memberId = groupFlushEx.memberId().asString(); + assertEquals("consumer-1", leaderId); + assertEquals("consumer-2", memberId); + assertEquals(2, groupFlushEx.members().fieldCount()); + } + + @Test + public void shouldGenerateGroupFlushExtensionWithEmptyMetadata() + { + byte[] build = KafkaFunctions.flushEx() + .typeId(0x01) + .group() + .leaderId("consumer-1") + .memberId("consumer-2") + .members("memberId-1") + .members("memberId-2") + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaFlushExFW flushEx = new KafkaFlushExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, flushEx.typeId()); + + final KafkaGroupFlushExFW groupFlushEx = flushEx.group(); + final String leaderId = groupFlushEx.leaderId().asString(); + final String memberId = groupFlushEx.memberId().asString(); + assertEquals("consumer-1", leaderId); + assertEquals("consumer-2", memberId); + assertEquals(2, groupFlushEx.members().fieldCount()); } @Test @@ -2642,6 +2746,41 @@ public void shouldMatchMergedBeginExtensionTopic() throws Exception assertNotNull(matcher.match(byteBuf)); } + @Test + public void shouldMatchMergedBeginExtensionGroupId() throws Exception + { + BytesMatcher matcher = KafkaFunctions.matchBeginEx() + .merged() + .topic("topic") + .groupId("test") + .build() + .build(); + + ByteBuffer byteBuf = ByteBuffer.allocate(1024); + + new KafkaBeginExFW.Builder() + .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) + .typeId(0x01) + .merged(f -> f + .topic("topic") + .groupId("test") + .partitionsItem(p -> p.partitionId(0).partitionOffset(0L)) + .filtersItem(i -> i + .conditionsItem(c -> c + .key(k -> k + .length(3) + .value(v -> v.set("key".getBytes(UTF_8))))) + .conditionsItem(c -> c + .header(h -> h + .nameLen(4) + .name(n -> n.set("name".getBytes(UTF_8))) + .valueLen(5) + .value(v -> v.set("value".getBytes(UTF_8))))))) + .build(); + + assertNotNull(matcher.match(byteBuf)); + } + @Test public void shouldMatchMergedBeginExtensionConsumerId() throws Exception { @@ -3869,6 +4008,30 @@ public void shouldGenerateMergedBeginExtensionWithHeadersFilter() @Test public void shouldGenerateGroupBeginExtension() + { + byte[] build = KafkaFunctions.beginEx() + .typeId(0x01) + .group() + .groupId("test") + .protocol("roundrobin") + .timeout(10) + .metadata("test".getBytes()) + .build() + .build(); + + DirectBuffer buffer = new UnsafeBuffer(build); + KafkaBeginExFW beginEx = new KafkaBeginExFW().wrap(buffer, 0, buffer.capacity()); + assertEquals(0x01, beginEx.typeId()); + assertEquals(KafkaApi.GROUP.value(), beginEx.kind()); + + final KafkaGroupBeginExFW groupBeginEx = beginEx.group(); + assertEquals("test", groupBeginEx.groupId().asString()); + assertEquals("roundrobin", groupBeginEx.protocol().asString()); + assertEquals(10, groupBeginEx.timeout()); + } + + @Test + public void shouldGenerateGroupBeginWithEmptyMetadataExtension() { byte[] build = KafkaFunctions.beginEx() .typeId(0x01) @@ -3895,11 +4058,13 @@ public void shouldGenerateConsumerBeginExtension() { byte[] build = KafkaFunctions.beginEx() .typeId(0x01) - .consumer() - .groupId("test") - .topic("topic") - .partition(1) - .build() + .consumer() + .groupId("test") + .consumerId("consumer-1") + .timeout(10000) + .topic("topic") + .partition(0) + .build() .build(); DirectBuffer buffer = new UnsafeBuffer(build); @@ -3975,52 +4140,33 @@ public void shouldMatchGroupBeginExtension() throws Exception .group(f -> f .groupId("test") .protocol("roundrobin") - .timeout(10)) + .timeout(10) + .metadataLen("test".length()) + .metadata(m -> m.set("test".getBytes()))) .build(); assertNotNull(matcher.match(byteBuf)); } - @Test - public void shouldGenerateGroupDataExtension() - { - byte[] build = KafkaFunctions.dataEx() - .typeId(0x01) - .group() - .leaderId("test1") - .memberId("test2") - .members(2) - .build() - .build(); - - DirectBuffer buffer = new UnsafeBuffer(build); - KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity()); - assertEquals(0x01, dataEx.typeId()); - assertEquals(KafkaApi.GROUP.value(), dataEx.kind()); - - final KafkaGroupDataExFW groupDataEx = dataEx.group(); - assertEquals("test1", groupDataEx.leaderId().asString()); - assertEquals("test2", groupDataEx.memberId().asString()); - assertTrue(groupDataEx.members() == 2); - } - @Test public void shouldGenerateConsumerDataExtension() { byte[] build = KafkaFunctions.dataEx() - .typeId(0x01) + .typeId(0x03) .consumer() .partition(0) + .assignment("localhost:9092", 0) .build() .build(); DirectBuffer buffer = new UnsafeBuffer(build); KafkaDataExFW dataEx = new KafkaDataExFW().wrap(buffer, 0, buffer.capacity()); - assertEquals(0x01, dataEx.typeId()); + assertEquals(0x03, dataEx.typeId()); assertEquals(KafkaApi.CONSUMER.value(), dataEx.kind()); final KafkaConsumerDataExFW consumerDataEx = dataEx.consumer(); assertTrue(consumerDataEx.partitions().fieldCount() == 1); + assertTrue(consumerDataEx.assignments().fieldCount() == 1); } @Test @@ -4029,7 +4175,7 @@ public void shouldGenerateOffsetFetchDataExtension() byte[] build = KafkaFunctions.dataEx() .typeId(0x01) .offsetFetch() - .topic("test", 0, 1L, 2L) + .topic("test", 0, 1L) .build() .build(); @@ -4041,8 +4187,8 @@ public void shouldGenerateOffsetFetchDataExtension() final KafkaOffsetFetchDataExFW offsetFetchDataEx = dataEx.offsetFetch(); KafkaOffsetFW offset = offsetFetchDataEx.topic().offsets().matchFirst(o -> o.partitionId() == 0); assertEquals("test", offsetFetchDataEx.topic().topic().asString()); - assertEquals(1L, offset.stableOffset()); - assertEquals(2L, offset.latestOffset()); + assertEquals(0, offset.partitionId()); + assertEquals(1L, offset.partitionOffset()); } @Test @@ -4066,32 +4212,6 @@ public void shouldGenerateOffsetCommitDataExtension() assertEquals(1L, offsetCommitDataEx.partitionOffset()); } - @Test - public void shouldMatchGroupDataExtension() throws Exception - { - BytesMatcher matcher = KafkaFunctions.matchDataEx() - .typeId(0x01) - .group() - .leaderId("test1") - .memberId("test2") - .members(2) - .build() - .build(); - - ByteBuffer byteBuf = ByteBuffer.allocate(1024); - - new KafkaDataExFW.Builder() - .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) - .typeId(0x01) - .group(f -> f - .leaderId("test1") - .memberId("test2") - .members(2)) - .build(); - - assertNotNull(matcher.match(byteBuf)); - } - @Test public void shouldInvokeLength() throws Exception { @@ -4352,23 +4472,25 @@ public void shouldMatchMergedFlushExtension() throws Exception BytesMatcher matcher = KafkaFunctions.matchFlushEx() .typeId(0x01) .merged() - .partition(1, 2) - .progress(0, 1L) - .capabilities("FETCH_ONLY") - .key("key") - .build() + .fetch() + .partition(1, 2) + .progress(0, 1L) + .capabilities("FETCH_ONLY") + .key("key") + .build() .build(); ByteBuffer byteBuf = ByteBuffer.allocate(1024); new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.partition(p -> p.partitionId(1).partitionOffset(2)) + .merged(f -> f + .fetch(m -> m.partition(p -> p.partitionId(1).partitionOffset(2)) .progressItem(p -> p .partitionId(0) .partitionOffset(1L)) - .capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)) - .key(k -> k.length(3).value(v -> v.set("key".getBytes(UTF_8))))) + .capabilities(c -> c.set(KafkaCapabilities.FETCH_ONLY)) + .key(k -> k.length(3).value(v -> v.set("key".getBytes(UTF_8)))))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4380,6 +4502,7 @@ public void shouldMatchMergedFlushExtensionWithLatestOffset() throws Exception BytesMatcher matcher = KafkaFunctions.matchFlushEx() .typeId(0x01) .merged() + .fetch() .partition(0, 1L, 1L) .progress(0, 1L, 1L) .build() @@ -4390,12 +4513,12 @@ public void shouldMatchMergedFlushExtensionWithLatestOffset() throws Exception new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> - f.partition(p -> p.partitionId(0).partitionOffset(1L).latestOffset(1L)) + .merged(f -> f + .fetch(m -> m.partition(p -> p.partitionId(0).partitionOffset(1L).latestOffset(1L)) .progressItem(p -> p .partitionId(0) .partitionOffset(1L) - .latestOffset(1L))) + .latestOffset(1L)))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4412,9 +4535,11 @@ public void shouldMatchMergedFlushExtensionTypeId() throws Exception new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.progressItem(p -> p - .partitionId(0) - .partitionOffset(1L))) + .merged(f -> f + .fetch(m -> + m.progressItem(p -> p + .partitionId(0) + .partitionOffset(1L)))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4425,6 +4550,7 @@ public void shouldMatchMergedFlushExtensionProgress() throws Exception { BytesMatcher matcher = KafkaFunctions.matchFlushEx() .merged() + .fetch() .progress(0, 1L, 1L, 1L) .build() .build(); @@ -4433,11 +4559,12 @@ public void shouldMatchMergedFlushExtensionProgress() throws Exception new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.progressItem(p -> p - .partitionId(0) - .partitionOffset(1L) - .stableOffset(1L) - .latestOffset(1L))) + .merged(f -> f + .fetch(m -> m.progressItem(p -> p + .partitionId(0) + .partitionOffset(1L) + .stableOffset(1L) + .latestOffset(1L)))) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4447,12 +4574,14 @@ public void shouldMatchMergedFlushExtensionProgress() throws Exception public void shouldMatchMergedFlushExtensionFilters() throws Exception { BytesMatcher matcher = KafkaFunctions.matchFlushEx() + .typeId(0x01) .merged() - .filter() - .key("key") - .header("name", "value") + .fetch() + .filter() + .key("key") + .header("name", "value") + .build() .build() - .build() .build(); ByteBuffer byteBuf = ByteBuffer.allocate(1024); @@ -4461,7 +4590,7 @@ public void shouldMatchMergedFlushExtensionFilters() throws Exception .wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) .merged(f -> f - .filtersItem(i -> i + .fetch(m -> m.filtersItem(i -> i .conditionsItem(c -> c .key(k -> k .length(3) @@ -4472,6 +4601,7 @@ public void shouldMatchMergedFlushExtensionFilters() throws Exception .name(n -> n.set("name".getBytes(UTF_8))) .valueLen(5) .value(v -> v.set("value".getBytes(UTF_8))))))) + ) .build(); assertNotNull(matcher.match(byteBuf)); @@ -4488,9 +4618,10 @@ public void shouldNotMatchMergedFlushExtensionTypeId() throws Exception new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.progressItem(p -> p - .partitionId(0) - .partitionOffset(1L))) + .merged(f -> f + .fetch(m -> m.progressItem(p -> p + .partitionId(0) + .partitionOffset(1L)))) .build(); matcher.match(byteBuf); @@ -4502,17 +4633,19 @@ public void shouldNotMatchMergedFlushExtensionProgress() throws Exception BytesMatcher matcher = KafkaFunctions.matchFlushEx() .typeId(0x01) .merged() - .progress(0, 2L) - .build() + .fetch() + .progress(0, 2L) + .build() .build(); ByteBuffer byteBuf = ByteBuffer.allocate(1024); new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) .typeId(0x01) - .merged(f -> f.progressItem(p -> p + .merged(f -> f + .fetch(m -> m.progressItem(p -> p .partitionId(0) - .partitionOffset(1L))) + .partitionOffset(1L)))) .build(); matcher.match(byteBuf); @@ -4747,6 +4880,54 @@ public void shouldNotMatchFetchFlushExtensionWithStableOffset() throws Exception matcher.match(byteBuf); } + @Test + public void shouldMatchGroupFlushExtensionMembers() throws Exception + { + BytesMatcher matcher = KafkaFunctions.matchFlushEx() + .typeId(0x01) + .group() + .leaderId("memberId-1") + .memberId("memberId-2") + .members("memberId-1") + .build() + .build(); + + ByteBuffer byteBuf = ByteBuffer.allocate(1024); + + new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) + .typeId(0x01) + .group(f -> f.leaderId("memberId-1").memberId("memberId-2"). + members(m -> m.item(i -> i.id("memberId-1")))) + .build(); + + assertNotNull(matcher.match(byteBuf)); + } + + @Test + public void shouldMatchGroupFlushExtensionMembersMetadata() throws Exception + { + BytesMatcher matcher = KafkaFunctions.matchFlushEx() + .typeId(0x01) + .group() + .leaderId("memberId-1") + .memberId("memberId-2") + .members("memberId-1", "test") + .build() + .build(); + + ByteBuffer byteBuf = ByteBuffer.allocate(1024); + + new KafkaFlushExFW.Builder().wrap(new UnsafeBuffer(byteBuf), 0, byteBuf.capacity()) + .typeId(0x01) + .group(f -> f.leaderId("memberId-1").memberId("memberId-2"). + members(m -> m.item(i -> i.id("memberId-1") + .metadataLen("test".length()).metadata(o -> o.set("test".getBytes()))))) + .build(); + + assertNotNull(matcher.match(byteBuf)); + } + + @Test(expected = Exception.class) public void shouldNotMatchFetchFlushExtensionWithLatestOffset() throws Exception { diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java new file mode 100644 index 0000000000..8cf378a166 --- /dev/null +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.specs.binding.kafka.streams.application; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +public class ConsumerIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/consumer"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + @Rule + public final TestRule chain = outerRule(k3po).around(timeout); + + @Test + @Specification({ + "${app}/partition.assignment/client", + "${app}/partition.assignment/server"}) + public void shouldAssignPartition() throws Exception + { + k3po.finish(); + } +} diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java index ae9738a9b4..416825c4db 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java @@ -26,7 +26,6 @@ import org.kaazing.k3po.junit.annotation.Specification; import org.kaazing.k3po.junit.rules.K3poRule; - public class GroupIT { private final K3poRule k3po = new K3poRule() @@ -90,4 +89,22 @@ public void shouldIgnoreHeartbeatBeforeHandshakeComplete() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/rebalance.sync.group/client", + "${app}/rebalance.sync.group/server"}) + public void shouldHandleRebalanceSyncGroup() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/partition.assignment/client", + "${app}/partition.assignment/server"}) + public void shouldAssignGroupPartition() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java index a6fd7859e0..e019773402 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java @@ -647,4 +647,22 @@ public void shouldFetchMergedMessagesWithIsolationReadCommitted() throws Excepti { k3po.finish(); } + + @Test + @Specification({ + "${app}/merged.group.fetch.message.value/client", + "${app}/merged.group.fetch.message.value/server"}) + public void shouldFetchGroupMergedMessage() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/unmerged.group.fetch.message.value/client", + "${app}/unmerged.group.fetch.message.value/server"}) + public void shouldFetchGroupUnmergedMessage() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/client.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/client.rpt index a9b8af7a00..7d6a3b1061 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/client.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/client.rpt @@ -57,6 +57,7 @@ read "Hello, again" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/server.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/server.rpt index 085878d37b..f2e0624691 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/server.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.etag/server.rpt @@ -63,6 +63,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/client.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/client.rpt index aceeba26e0..a26e271b43 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/client.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/client.rpt @@ -53,6 +53,7 @@ read "Hello, again" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/server.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/server.rpt index 999c278a0c..1bf51d9003 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/server.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages.with.null.key/server.rpt @@ -59,6 +59,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/client.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/client.rpt index 7c455f08a9..3cf2256370 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/client.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/client.rpt @@ -55,6 +55,7 @@ read "Hello, again" read advised zilla:flush ${kafka:matchFlushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} diff --git a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/server.rpt b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/server.rpt index 889af16b13..ddfd07e530 100644 --- a/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/server.rpt +++ b/specs/binding-sse-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/sse/kafka/streams/kafka/server.sent.messages/server.rpt @@ -61,6 +61,7 @@ write flush write advise zilla:flush ${kafka:flushEx() .typeId(zilla:id("kafka")) .merged() + .fetch() .progress(0, 2, 2, 2) .build() .build()} From 3a4deb5f828ddbe9f92ed9c43cba2e68993faeed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 13:50:25 -0700 Subject: [PATCH 078/115] Bump actions/checkout from 3 to 4 (#393) Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/codeql.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1f134fc0d8..d87d467656 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Checkout GitHub sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup JDK ${{ matrix.java }} uses: actions/setup-java@v3 with: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d0b61a6cd9..d790178a93 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL From 9283d31f1535bb556690f638d856a1d1adcde328 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 7 Sep 2023 08:00:07 -0700 Subject: [PATCH 079/115] Support build after local docker zpm install (#396) --- cloud/docker-image/pom.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 5db823424a..94e06c2dc7 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -203,6 +203,7 @@ src/main/docker/*/zpmw src/main/docker/*/zilla src/main/docker/*/zilla.properties + src/main/docker/*/.zilla/** src/main/docker/*/zpm.json.template src/main/docker/*/zpm.json src/main/docker/*/.zpm/** From 08626dbb35fa9cb713251ac90bbbc9f200a5fd10 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Mon, 11 Sep 2023 09:04:12 -0700 Subject: [PATCH 080/115] Support consumer protocol (#400) --- runtime/binding-kafka/pom.xml | 2 +- ...a => KafkaCacheClientConsumerFactory.java} | 34 ++-- .../stream/KafkaCacheClientFactory.java | 3 +- ...a => KafkaCacheServerConsumerFactory.java} | 30 ++-- .../stream/KafkaCacheServerFactory.java | 3 +- .../internal/stream/KafkaClientFactory.java | 4 - .../stream/KafkaClientGroupFactory.java | 157 +++++++++++++++--- .../binding-kafka/src/main/zilla/protocol.idl | 31 ++++ .../internal/stream/CacheConsumerIT.java | 3 +- .../internal/stream/ClientConsumerIT.java | 64 ------- .../client.rpt | 23 +-- .../server.rpt | 30 ++-- .../coordinator.not.available/client.rpt | 22 ++- .../coordinator.not.available/server.rpt | 26 +-- .../client.rpt | 57 ++++--- .../server.rpt | 28 ++-- .../client.rpt | 26 +-- .../server.rpt | 24 ++- .../client.rpt | 34 ++-- .../server.rpt | 42 +++-- .../client.rpt | 24 ++- .../server.rpt | 28 ++-- .../rebalance.protocol.highlander/client.rpt | 41 +++-- .../rebalance.protocol.highlander/server.rpt | 43 +++-- .../rebalance.protocol.unknown/client.rpt | 18 +- .../rebalance.protocol.unknown/server.rpt | 19 ++- .../rebalance.sync.group/client.rpt | 30 ++-- .../rebalance.sync.group/server.rpt | 38 +++-- .../leader/client.rpt | 19 ++- .../leader/server.rpt | 22 ++- 30 files changed, 558 insertions(+), 367 deletions(-) rename runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/{KafkaCacheConsumerFactory.java => KafkaCacheClientConsumerFactory.java} (97%) rename runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/{KafkaClientConsumerFactory.java => KafkaCacheServerConsumerFactory.java} (97%) delete mode 100644 runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java diff --git a/runtime/binding-kafka/pom.xml b/runtime/binding-kafka/pom.xml index 6f1f4a3945..c12e6cc57d 100644 --- a/runtime/binding-kafka/pom.xml +++ b/runtime/binding-kafka/pom.xml @@ -26,7 +26,7 @@ 11 11 - 0.80 + 0.79 3 diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java similarity index 97% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java rename to runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java index 6396dfe1af..7bf4d689c2 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheConsumerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java @@ -55,7 +55,7 @@ import io.aklivity.zilla.runtime.engine.buffer.BufferPool; import io.aklivity.zilla.runtime.engine.concurrent.Signaler; -public final class KafkaCacheConsumerFactory implements BindingHandler +public final class KafkaCacheClientConsumerFactory implements BindingHandler { private static final Consumer EMPTY_EXTENSION = ex -> {}; @@ -93,9 +93,9 @@ public final class KafkaCacheConsumerFactory implements BindingHandler private final LongFunction supplyLocalName; private final LongFunction supplyBinding; - private final Object2ObjectHashMap clientConsumerFansByGroupId; + private final Object2ObjectHashMap clientConsumerFansByGroupId; - public KafkaCacheConsumerFactory( + public KafkaCacheClientConsumerFactory( KafkaConfiguration config, EngineContext context, LongFunction supplyBinding) @@ -154,18 +154,18 @@ public MessageConsumer newStream( { final long resolvedId = resolved.id; - KafkaCacheConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); + KafkaCacheClientConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); if (fanout == null) { - KafkaCacheConsumerFanout newFanout = - new KafkaCacheConsumerFanout(routedId, resolvedId, authorization, groupId, + KafkaCacheClientConsumerFanout newFanout = + new KafkaCacheClientConsumerFanout(routedId, resolvedId, authorization, groupId, topic, consumerId, partitions, timeout); fanout = newFanout; clientConsumerFansByGroupId.put(groupId, fanout); } - newStream = new KafkaCacheConsumerStream( + newStream = new KafkaCacheClientConsumerStream( fanout, sender, originId, @@ -380,7 +380,7 @@ private void doReset( sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); } - final class KafkaCacheConsumerFanout + final class KafkaCacheClientConsumerFanout { private final long originId; private final long routedId; @@ -389,7 +389,7 @@ final class KafkaCacheConsumerFanout private final String topic; private final String consumerId; private final int timeout; - private final List members; + private final List members; private final IntHashSet partitions; private final IntHashSet assignedPartitions; private final Object2ObjectHashMap assignments; @@ -409,7 +409,7 @@ final class KafkaCacheConsumerFanout private int replyMax; - private KafkaCacheConsumerFanout( + private KafkaCacheClientConsumerFanout( long originId, long routedId, long authorization, @@ -434,7 +434,7 @@ private KafkaCacheConsumerFanout( private void onConsumerFanoutMemberOpening( long traceId, - KafkaCacheConsumerStream member) + KafkaCacheClientConsumerStream member) { members.add(member); @@ -455,7 +455,7 @@ private void onConsumerFanoutMemberOpening( private void onConsumerFanoutMemberOpened( long traceId, - KafkaCacheConsumerStream member) + KafkaCacheClientConsumerStream member) { if (!assignedPartitions.isEmpty()) { @@ -476,7 +476,7 @@ private void onConsumerFanoutMemberOpened( private void onConsumerFanoutMemberClosed( long traceId, - KafkaCacheConsumerStream member) + KafkaCacheClientConsumerStream member) { members.remove(member); @@ -746,9 +746,9 @@ private void doConsumerFanoutReplyWindow( } } - private final class KafkaCacheConsumerStream + private final class KafkaCacheClientConsumerStream { - private final KafkaCacheConsumerFanout group; + private final KafkaCacheClientConsumerFanout group; private final MessageConsumer sender; private final long originId; private final long routedId; @@ -770,8 +770,8 @@ private final class KafkaCacheConsumerStream private long replyBudgetId; - KafkaCacheConsumerStream( - KafkaCacheConsumerFanout group, + KafkaCacheClientConsumerStream( + KafkaCacheClientConsumerFanout group, MessageConsumer sender, long originId, long routedId, diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java index 8c547bae7e..adc07c8e6e 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientFactory.java @@ -66,7 +66,8 @@ public KafkaCacheClientFactory( final KafkaCacheGroupFactory cacheGroupFactory = new KafkaCacheGroupFactory(config, context, bindings::get); - final KafkaCacheConsumerFactory consumerGroupFactory = new KafkaCacheConsumerFactory(config, context, bindings::get); + final KafkaCacheClientConsumerFactory consumerGroupFactory = + new KafkaCacheClientConsumerFactory(config, context, bindings::get); final KafkaCacheOffsetFetchFactory cacheOffsetFetchFactory = new KafkaCacheOffsetFetchFactory(config, context, bindings::get); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java similarity index 97% rename from runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java rename to runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java index 7e81172693..bb82dff173 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConsumerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java @@ -56,7 +56,7 @@ import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; import io.aklivity.zilla.runtime.engine.buffer.BufferPool; -public final class KafkaClientConsumerFactory implements BindingHandler +public final class KafkaCacheServerConsumerFactory implements BindingHandler { private static final Consumer EMPTY_EXTENSION = ex -> {}; private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer(); @@ -100,9 +100,9 @@ public final class KafkaClientConsumerFactory implements BindingHandler private final LongUnaryOperator supplyInitialId; private final LongUnaryOperator supplyReplyId; private final LongFunction supplyBinding; - private final Object2ObjectHashMap clientConsumerFansByGroupId; + private final Object2ObjectHashMap clientConsumerFansByGroupId; - public KafkaClientConsumerFactory( + public KafkaCacheServerConsumerFactory( KafkaConfiguration config, EngineContext context, LongFunction supplyBinding) @@ -158,17 +158,17 @@ public MessageConsumer newStream( { final long resolvedId = resolved.id; - KafkaClientConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); + KafkaCacheServerConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); if (fanout == null) { - KafkaClientConsumerFanout newFanout = - new KafkaClientConsumerFanout(routedId, resolvedId, authorization, consumerId, groupId, timeout); + KafkaCacheServerConsumerFanout newFanout = + new KafkaCacheServerConsumerFanout(routedId, resolvedId, authorization, consumerId, groupId, timeout); fanout = newFanout; clientConsumerFansByGroupId.put(groupId, fanout); } - newStream = new KafkaClientConsumerStream( + newStream = new KafkaCacheServerConsumerStream( fanout, sender, originId, @@ -517,7 +517,7 @@ private void doReset( sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); } - final class KafkaClientConsumerFanout + final class KafkaCacheServerConsumerFanout { private final String consumerId; private final String groupId; @@ -525,7 +525,7 @@ final class KafkaClientConsumerFanout private final long routedId; private final long authorization; private final int timeout; - private final List streams; + private final List streams; private final Object2ObjectHashMap members; private final Object2ObjectHashMap partitionsByTopic; private final Object2ObjectHashMap> assignment; @@ -549,7 +549,7 @@ final class KafkaClientConsumerFanout private String memberId; - private KafkaClientConsumerFanout( + private KafkaCacheServerConsumerFanout( long originId, long routedId, long authorization, @@ -834,7 +834,7 @@ private void onConsumerReplyData( topicAssignments.forEach(ta -> { - KafkaClientConsumerStream stream = + KafkaCacheServerConsumerStream stream = streams.stream().filter(s -> s.topic.equals(ta.topic().asString())).findFirst().get(); stream.doConsumerReplyData(traceId, flags, replyPad, EMPTY_OCTETS, @@ -985,9 +985,9 @@ private void doMemberAssigment( } } - final class KafkaClientConsumerStream + final class KafkaCacheServerConsumerStream { - private final KafkaClientConsumerFanout fanout; + private final KafkaCacheServerConsumerFanout fanout; private final MessageConsumer sender; private final String topic; private final List partitions; @@ -1012,8 +1012,8 @@ final class KafkaClientConsumerStream private long replyBud; private int replyCap; - KafkaClientConsumerStream( - KafkaClientConsumerFanout fanout, + KafkaCacheServerConsumerStream( + KafkaCacheServerConsumerFanout fanout, MessageConsumer sender, long originId, long routedId, diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java index 72c431cf91..1d28951bc4 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerFactory.java @@ -69,7 +69,8 @@ public KafkaCacheServerFactory( final KafkaCacheGroupFactory cacheGroupFactory = new KafkaCacheGroupFactory(config, context, bindings::get); - final KafkaCacheConsumerFactory consumerGroupFactory = new KafkaCacheConsumerFactory(config, context, bindings::get); + final KafkaCacheServerConsumerFactory consumerGroupFactory = + new KafkaCacheServerConsumerFactory(config, context, bindings::get); final KafkaCacheOffsetFetchFactory cacheOffsetFetchFactory = new KafkaCacheOffsetFetchFactory(config, context, bindings::get); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java index 289ebff849..99f87b14b0 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java @@ -61,9 +61,6 @@ public KafkaClientFactory( final KafkaClientGroupFactory clientGroupFactory = new KafkaClientGroupFactory( config, context, bindings::get, accountant::supplyDebitor); - final KafkaClientConsumerFactory clientConsumerFactory = new KafkaClientConsumerFactory( - config, context, bindings::get); - final KafkaClientFetchFactory clientFetchFactory = new KafkaClientFetchFactory( config, context, bindings::get, accountant::supplyDebitor, supplyClientRoute); @@ -80,7 +77,6 @@ public KafkaClientFactory( factories.put(KafkaBeginExFW.KIND_META, clientMetaFactory); factories.put(KafkaBeginExFW.KIND_DESCRIBE, clientDescribeFactory); factories.put(KafkaBeginExFW.KIND_GROUP, clientGroupFactory); - factories.put(KafkaBeginExFW.KIND_CONSUMER, clientConsumerFactory); factories.put(KafkaBeginExFW.KIND_FETCH, clientFetchFactory); factories.put(KafkaBeginExFW.KIND_PRODUCE, clientProduceFactory); factories.put(KafkaBeginExFW.KIND_OFFSET_FETCH, clientOffsetFetchFactory); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index 8cfd2ae67c..d1693edccc 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -55,6 +55,9 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.DescribeConfigsResponseFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerMetadataTopicFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerUserdataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.AssignmentFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.FindCoordinatorRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.FindCoordinatorResponseFW; @@ -82,6 +85,7 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; @@ -96,6 +100,7 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker implements BindingHandler { + private static final short METADATA_LOWEST_VERSION = 0; private static final short ERROR_EXISTS = -1; private static final short ERROR_NONE = 0; @@ -107,7 +112,7 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private static final short SIGNAL_NEXT_REQUEST = 1; private static final short DESCRIBE_CONFIGS_API_KEY = 32; private static final short DESCRIBE_CONFIGS_API_VERSION = 0; - private static final byte RESOURCE_TYPE_BROKER = 1; + private static final byte RESOURCE_TYPE_BROKER = 4; private static final short FIND_COORDINATOR_API_KEY = 10; private static final short FIND_COORDINATOR_API_VERSION = 1; private static final short JOIN_GROUP_API_KEY = 11; @@ -164,6 +169,9 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final HeartbeatRequestFW.Builder heartbeatRequestRW = new HeartbeatRequestFW.Builder(); private final LeaveGroupRequestFW.Builder leaveGroupRequestRW = new LeaveGroupRequestFW.Builder(); private final LeaveMemberFW.Builder leaveMemberRW = new LeaveMemberFW.Builder(); + private final ConsumerMetadataFW.Builder groupMemberMetadataRW = new ConsumerMetadataFW.Builder(); + private final ConsumerMetadataTopicFW.Builder groupMetadataTopicRW = new ConsumerMetadataTopicFW.Builder(); + private final ConsumerUserdataFW.Builder groupUserdataRW = new ConsumerUserdataFW.Builder(); private final ResourceResponseFW resourceResponseRO = new ResourceResponseFW(); private final ConfigResponseFW configResponseRO = new ConfigResponseFW(); @@ -178,6 +186,11 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final LeaveMemberFW leaveMemberRO = new LeaveMemberFW(); private final Array32FW memberAssignmentRO = new Array32FW<>(new MemberAssignmentFW()); + private final ConsumerMetadataFW groupMemberMetadataRO = new ConsumerMetadataFW(); + private final ConsumerMetadataTopicFW groupMetadataTopicRO = new ConsumerMetadataTopicFW(); + private final ConsumerUserdataFW groupUserdataRO = new ConsumerUserdataFW(); + + private final KafkaGroupMemberMetadataFW kafkaMemberMetadataRO = new KafkaGroupMemberMetadataFW(); private final KafkaDescribeClientDecoder decodeSaslHandshakeResponse = this::decodeSaslHandshakeResponse; private final KafkaDescribeClientDecoder decodeSaslHandshake = this::decodeSaslHandshake; @@ -516,7 +529,7 @@ private void doFlush( long authorization, long budgetId, int reserved, - Flyweight extension) + Consumer extension) { final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) .originId(originId) @@ -529,7 +542,7 @@ private void doFlush( .authorization(authorization) .budgetId(budgetId) .reserved(reserved) - .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .extension(extension) .build(); receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); @@ -1333,7 +1346,7 @@ private void onApplicationBegin( if (metadataSize > 0) { - metadataBuffer.putBytes(0, metadata.buffer(), metadata.offset(), kafkaGroupBeginEx.metadataLen()); + metadataBuffer.putBytes(0, metadata.value(), 0, metadataSize); topicMetadataLimit += metadataSize; } @@ -1461,7 +1474,7 @@ private void doApplicationData( { doData(application, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, replyBudgetId, reserved, - payload.value(), payload.offset(), payload.sizeof(), EMPTY_EXTENSION); + payload.value(), 0, payload.sizeof(), EMPTY_EXTENSION); } else { @@ -1477,7 +1490,7 @@ private void doApplicationData( private void doApplicationFlush( long traceId, long authorization, - Flyweight extension) + Consumer extension) { if (!KafkaState.replyClosed(state)) { @@ -3381,10 +3394,13 @@ private void doEncodeJoinGroupRequest( encodeProgress = joinGroupRequest.limit(); + final int metadataLimit = delegate.topicMetadataLimit > 0 ? doGenerateMembersMetadata() : + doGenerateEmptyMetadata(); + final ProtocolMetadataFW protocolMetadata = protocolMetadataRW.wrap(encodeBuffer, encodeProgress, encodeLimit) .name(delegate.protocol) - .metadata(delegate.metadataBuffer, 0, delegate.topicMetadataLimit) + .metadata(extBuffer, 0, metadataLimit) .build(); encodeProgress = protocolMetadata.limit(); @@ -3407,6 +3423,75 @@ private void doEncodeJoinGroupRequest( delegate.doApplicationBeginIfNecessary(traceId, authorization); } + private int doGenerateMembersMetadata() + { + final MutableDirectBuffer encodeBuffer = extBuffer; + final int encodeOffset = 0; + final int encodeLimit = encodeBuffer.capacity(); + + final MutableInteger encodeProgress = new MutableInteger(encodeOffset); + + KafkaGroupMemberMetadataFW memberMetadata = kafkaMemberMetadataRO + .wrap(delegate.metadataBuffer, 0, delegate.topicMetadataLimit); + + ConsumerMetadataFW metadata = groupMemberMetadataRW + .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .version(METADATA_LOWEST_VERSION) + .metadataTopicCount(memberMetadata.topics().fieldCount()) + .build(); + + encodeProgress.set(metadata.limit()); + + memberMetadata.topics().forEach(t -> + { + ConsumerMetadataTopicFW metadataTopic = groupMetadataTopicRW + .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .name(t.topic()) + .build(); + encodeProgress.set(metadataTopic.limit()); + }); + + memberMetadata.topics().forEach(t -> + { + final ConsumerUserdataFW userdata = groupUserdataRW + .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .userdata(delegate.metadataBuffer, 0, delegate.topicMetadataLimit) + .ownedPartitions(0) + .build(); + + encodeProgress.set(userdata.limit()); + }); + + return encodeProgress.get(); + } + + private int doGenerateEmptyMetadata() + { + final MutableDirectBuffer encodeBuffer = extBuffer; + final int encodeOffset = 0; + final int encodeLimit = encodeBuffer.capacity(); + + final MutableInteger encodeProgress = new MutableInteger(encodeOffset); + + ConsumerMetadataFW metadata = groupMemberMetadataRW + .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .version(METADATA_LOWEST_VERSION) + .metadataTopicCount(0) + .build(); + + encodeProgress.set(metadata.limit()); + + final ConsumerUserdataFW userdata = groupUserdataRW + .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .userdata(delegate.metadataBuffer, 0, delegate.topicMetadataLimit) + .ownedPartitions(0) + .build(); + + encodeProgress.set(userdata.limit()); + + return encodeProgress.get(); + } + private void doEncodeSyncGroupRequest( long traceId, long budgetId) @@ -3451,7 +3536,7 @@ private void doEncodeSyncGroupRequest( final AssignmentFW groupAssignment = assignmentRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .memberId(a.memberId()) - .value(topicPartitions.buffer(), topicPartitions.offset(), topicPartitions.length()) + .value(topicPartitions.buffer(), topicPartitions.offset(), topicPartitions.sizeof()) .build(); encodeProgress.set(groupAssignment.limit()); @@ -3464,7 +3549,7 @@ private void doEncodeSyncGroupRequest( final AssignmentFW groupAssignment = assignmentRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .memberId(m.memberId) - .value(m.metadata) + .value(EMPTY_OCTETS) .build(); encodeProgress.set(groupAssignment.limit()); @@ -3847,25 +3932,43 @@ private void onJoinGroupResponse( delegate.groupMembership.memberIds.put(delegate.groupId, memberId); - final KafkaFlushExFW kafkaFlushEx = - kafkaFlushExRW.wrap(writeBuffer, FlushFW.FIELD_OFFSET_EXTENSION, writeBuffer.capacity()) - .typeId(kafkaTypeId) - .group(g -> g.leaderId(leaderId) - .memberId(memberId) - .members(gm -> members.forEach(m -> - gm.item(i -> - { - KafkaGroupMemberFW.Builder member = i.id(m.memberId); - if (m.metadata.sizeof() > 0) - { - member.metadataLen(m.metadata.sizeof()) - .metadata(m.metadata) - .build(); - } - })))) - .build(); + delegate.doApplicationFlush(traceId, authorization, + ex -> ex.set((b, o, l) -> kafkaFlushExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .group(g -> g.leaderId(leaderId) + .memberId(memberId) + .members(gm -> members.forEach(m -> + { + OctetsFW metadata = m.metadata; + DirectBuffer buffer = metadata.value(); + final int limit = metadata.sizeof(); - delegate.doApplicationFlush(traceId, authorization, kafkaFlushEx); + int progress = 0; + + ConsumerMetadataFW newGroupMetadata = groupMemberMetadataRO + .wrap(buffer, 0, metadata.sizeof()); + progress = newGroupMetadata.limit(); + + for (int i = 0; i < newGroupMetadata.metadataTopicCount(); i++) + { + ConsumerMetadataTopicFW topic = groupMetadataTopicRO.wrap(buffer, progress, limit); + progress = topic.limit(); + } + + ConsumerUserdataFW userdata = groupUserdataRO.wrap(buffer, progress, limit); + + gm.item(i -> + { + KafkaGroupMemberFW.Builder builder = i.id(m.memberId); + OctetsFW newUserdata = userdata.userdata(); + if (newUserdata.sizeof() > 0) + { + builder.metadataLen(newUserdata.sizeof()).metadata(newUserdata); + } + }); + }))) + .build() + .sizeof())); encoder = encodeSyncGroupRequest; } diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl index 41461651fc..a7e56952be 100644 --- a/runtime/binding-kafka/src/main/zilla/protocol.idl +++ b/runtime/binding-kafka/src/main/zilla/protocol.idl @@ -536,7 +536,38 @@ scope protocol octets[authBytesLen] authBytes = null; int64 sessionLifetimeMs; } + } + + scope consumer + { + struct ConsumerTopicPartition + { + int32 partitionId; + } + + struct ConsumerTopic + { + string16 topic; + int32 partitionCount; + } + struct ConsumerUserdata + { + uint32 userdataLength; + octets[userdataLength] userdata; + int32 ownedPartitions; + } + + struct ConsumerMetadataTopic + { + string16 name; + } + + struct ConsumerMetadata + { + int16 version; + int32 metadataTopicCount; + } } } } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java index 9740c276bd..20835f64ff 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java @@ -33,6 +33,7 @@ public class CacheConsumerIT { private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/application/group") .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/consumer"); private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); @@ -53,7 +54,7 @@ public class CacheConsumerIT @Configuration("cache.when.topic.yaml") @Specification({ "${app}/partition.assignment/client", - "${app}/partition.assignment/server" + "${net}/partition.assignment/server" }) @ScriptProperty("serverAddress \"zilla://streams/app1\"") public void shouldAssignPartition() throws Exception diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java deleted file mode 100644 index e7b552c39d..0000000000 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientConsumerIT.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2021-2023 Aklivity Inc. - * - * Aklivity licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.aklivity.zilla.runtime.binding.kafka.internal.stream; - -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.junit.rules.RuleChain.outerRule; - -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.DisableOnDebug; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.kaazing.k3po.junit.annotation.ScriptProperty; -import org.kaazing.k3po.junit.annotation.Specification; -import org.kaazing.k3po.junit.rules.K3poRule; - -import io.aklivity.zilla.runtime.engine.test.EngineRule; -import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; - -public class ClientConsumerIT -{ - private final K3poRule k3po = new K3poRule() - .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/application/group") - .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/consumer"); - - private final TestRule timeout = new DisableOnDebug(new Timeout(15, SECONDS)); - - private final EngineRule engine = new EngineRule() - .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) - .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") - .external("net0") - .clean(); - - @Rule - public final TestRule chain = outerRule(engine).around(k3po).around(timeout); - - - @Test - @Configuration("client.yaml") - @Specification({ - "${app}/partition.assignment/client", - "${net}/partition.assignment/server"}) - @ScriptProperty("serverAddress \"zilla://streams/net0\"") - public void shouldAssignGroupPartition() throws Exception - { - k3po.finish(); - } -} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt index e408dfe143..1665ea0fac 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt @@ -114,25 +114,18 @@ connect await PARTITION_COUNT_2 write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) - .consumer() - .groupId("client-1") - .consumerId("localhost:9092") - .timeout(45000) - .topic("test") - .partition(0) - .partition(1) - .build() + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .build() .build()} connected -read zilla:data.ext ${kafka:dataEx() - .typeId(zilla:id("kafka")) - .consumer() - .partition(0) - .assignment("localhost:9092", 0) - .build() - .build()} +read ${kafka:topicAssignment() + .topic("test", 0, "localhost:9092", 0) + .build()} read notify RECEIVED_CONSUMER diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt index a0f622b99c..179148d13d 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt @@ -111,28 +111,20 @@ write flush accepted -read zilla:begin.ext ${kafka:beginEx() - .typeId(zilla:id("kafka")) - .consumer() - .groupId("client-1") - .consumerId("localhost:9092") - .timeout(45000) - .topic("test") - .partition(0) - .partition(1) - .build() - .build()} +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .build() + .build()} connected - -write zilla:data.ext ${kafka:dataEx() - .typeId(zilla:id("kafka")) - .consumer() - .partition(0) - .assignment("localhost:9092", 0) - .build() - .build()} +write ${kafka:topicAssignment() + .topic("test", 0, "localhost:9092", 0) + .build()} write flush accepted diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt index f06928bf4b..ed5a69dd49 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt @@ -82,7 +82,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -94,7 +94,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -118,7 +118,7 @@ connect await ROUTED_DESCRIBE_SERVER connected -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -131,7 +131,8 @@ write 105 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata read 34 # size (int:newRequestId) @@ -143,7 +144,7 @@ read 34 # size 10s "memberId-1" # consumer member group id 0 # members -write 115 # size +write 129 # size 11s # join group 5s # v5 ${newRequestId} @@ -156,9 +157,10 @@ write 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -169,7 +171,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt index 1071dc79f8..8330081a0e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt @@ -72,7 +72,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -84,7 +84,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -102,7 +102,7 @@ accepted connected -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) @@ -111,11 +111,12 @@ read 105 # size 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member - 42s [0..42] # group instance id + 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata write 34 # size @@ -128,7 +129,7 @@ write 34 # size 10s "memberId-1" # consumer member group id 0 # members -read 115 # size +read 129 # size 11s # join group 5s # v5 (int:newRequestId) @@ -141,9 +142,10 @@ read 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -154,7 +156,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt index b154dda2f3..46e0de6daf 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt @@ -65,7 +65,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -77,7 +77,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -101,7 +101,7 @@ connect await ROUTED_DESCRIBE_SERVER connected -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -114,7 +114,8 @@ write 105 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata read 24 # size (int:newRequestId) @@ -175,7 +176,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -187,7 +188,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -209,25 +210,25 @@ connect await ROUTED_DESCRIBE_SERVER_SECOND option zilla:transmission "duplex" option zilla:byteorder "network" - connected -write 105 # size - 11s # join group - 5s # v5 - ${newRequestId} - 5s "zilla" # no client id - 4s "test" # consumer group - 30000 # session timeout - 4000 # rebalance timeout - 0s # consumer group member - 42s ${instanceId} # group instance id - 8s "consumer" # protocol type - 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata - -read 112 # size +write 119 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 30000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 14 # metadata size + ${kafka:randomBytes(14)} # metadata + +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -236,9 +237,13 @@ read 112 # size 10s "memberId-1" # leader id 10s "memberId-1" # consumer member group id 1 # members - 10s "memberId-1" # consumer member group id - 42s [0..42] # group instance id - 0 # metadata + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt index 89a6646f74..3354dea382 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt @@ -55,7 +55,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -67,7 +67,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -85,7 +85,7 @@ accepted connected -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) @@ -97,8 +97,9 @@ read 105 # size 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata write 24 # size ${newRequestId} @@ -147,7 +148,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -159,7 +160,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -177,7 +178,7 @@ accepted connected -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) @@ -190,9 +191,10 @@ read 105 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -203,7 +205,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt index 205d723a8e..089a7589ae 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt @@ -65,7 +65,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -77,7 +77,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -101,7 +101,7 @@ connect await ROUTED_DESCRIBE_SERVER connected -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -113,8 +113,9 @@ write 105 # size 42s ${instanceId} # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + ${kafka:randomBytes(14)} # metadata read 34 # size (int:newRequestId) @@ -126,7 +127,7 @@ read 34 # size 10s "memberId-1" # consumer member group id 0 # members -write 115 # size +write 129 # size 11s # join group 5s # v5 ${newRequestId} @@ -138,10 +139,11 @@ write 115 # size 42s ${instanceId} # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -152,7 +154,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt index e5781e833c..b0489f13c4 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt @@ -55,7 +55,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -67,7 +67,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -85,7 +85,7 @@ accepted connected -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) @@ -97,8 +97,9 @@ read 105 # size 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata write 34 # size @@ -111,7 +112,7 @@ write 34 # size 10s "memberId-1" # consumer member group id 0 # members -read 115 # size +read 129 # size 11s # join group 5s # v5 (int:newRequestId) @@ -124,9 +125,10 @@ read 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -137,7 +139,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt index 3ffa6e2ca3..3d957289ca 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -65,7 +65,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -77,7 +77,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -101,7 +101,7 @@ connect await ROUTED_DESCRIBE_SERVER_FIRST connected -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -114,9 +114,10 @@ write 105 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -127,7 +128,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group @@ -196,7 +201,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -208,7 +213,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -232,7 +237,7 @@ connect await ROUTED_DESCRIBE_SERVER_SECOND connected -write 115 # size +write 129 # size 11s # join group 5s # v5 ${newRequestId} @@ -245,9 +250,10 @@ write 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -258,7 +264,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt index 353e127383..cbc7d59c98 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -55,7 +55,7 @@ read 82 # size (int:newRequestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -67,7 +67,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -85,11 +85,11 @@ accepted connected -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) - 5s "zilla" # no client id + 5s "zilla" # client id 4s "test" # consumer group 30000 # session timeout 4000 # rebalance timeout @@ -97,10 +97,11 @@ read 105 # size 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -111,7 +112,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group @@ -169,7 +174,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -181,7 +186,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -199,22 +204,23 @@ accepted connected -read 115 # size +read 129 # size 11s # join group 5s # v5 (int:newRequestId) - 5s "zilla" # no client id + 5s "zilla" # client id 4s "test" # consumer group 30000 # session timeout 4000 # rebalance timeout - 10s "memberId-1" # consumer group member + 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -225,7 +231,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt index 094ebb3212..37da5b3dc0 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt @@ -65,7 +65,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -77,7 +77,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -101,7 +101,7 @@ connect await ROUTED_DESCRIBE_SERVER connected -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -113,8 +113,9 @@ write 105 # size 42s ${instanceId} # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + ${kafka:randomBytes(14)} # metadata read 24 # size (int:newRequestId) @@ -126,7 +127,7 @@ read 24 # size 0s # consumer member group id 0 # members -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -139,9 +140,10 @@ write 105 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -152,7 +154,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt index 614a5cd0b6..3177ca86cd 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt @@ -55,7 +55,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -67,7 +67,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -85,7 +85,7 @@ accepted connected -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) @@ -97,8 +97,9 @@ read 105 # size 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata write 24 # size @@ -111,11 +112,11 @@ write 24 # size 0s # consumer member group id 0 # members -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) - 5s "zilla" # no client id + 5s "zilla" # client id 4s "test" # consumer group 30000 # session timeout 4000 # rebalance timeout @@ -123,10 +124,11 @@ read 105 # size 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -137,7 +139,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt index ed4521242e..bef50aaa64 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt @@ -65,7 +65,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -77,7 +77,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -101,7 +101,7 @@ connect await ROUTED_DESCRIBE_SERVER connected -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -114,7 +114,8 @@ write 105 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata read 34 # size (int:newRequestId) @@ -126,7 +127,7 @@ read 34 # size 10s "memberId-1" # consumer member group id 0 # members -write 115 # size +write 129 # size 11s # join group 5s # v5 ${newRequestId} @@ -139,9 +140,10 @@ write 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -152,7 +154,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group @@ -188,7 +194,7 @@ read 10 # size 0 # throttle time 27s # REBALANCE_IN_PROGRESS -write 115 # size +write 129 # size 11s # join group 5s # v5 ${newRequestId} @@ -201,10 +207,11 @@ write 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 170 # size +read 198 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -215,10 +222,18 @@ read 170 # size 2 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions 10s "memberId-2" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 117 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt index 788be98c4c..f37715afb3 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt @@ -55,7 +55,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -67,7 +67,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -85,7 +85,7 @@ accepted connected -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) @@ -97,8 +97,9 @@ read 105 # size 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata write 34 # size @@ -111,7 +112,7 @@ write 34 # size 10s "memberId-1" # consumer member group id 0 # members -read 115 # size +read 129 # size 11s # join group 5s # v5 (int:newRequestId) @@ -124,9 +125,10 @@ read 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -137,7 +139,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group @@ -173,7 +179,7 @@ write 10 # size 0 # throttle time 27s # REBALANCE_IN_PROGRESS -read 115 # size +read 129 # size 11s # join group 5s # v5 (int:newRequestId) @@ -186,10 +192,11 @@ read 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + [0..14] # metadata -write 170 # size +write 198 # size ${newRequestId} 0 # throttle time 0s # no error @@ -200,10 +207,18 @@ write 170 # size 2 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions 10s "memberId-2" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 117 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt index 3f523d8122..f5fe2b5722 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt @@ -65,7 +65,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -77,7 +77,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -101,7 +101,7 @@ connect await ROUTED_DESCRIBE_SERVER connected -write 102 # size +write 116 # size 11s # join group 5s # v5 ${newRequestId} @@ -114,9 +114,11 @@ write 102 # size 8s "consumer" # protocol type 1 # group protocol 7s "unknown" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 109 # size + +read 123 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -127,7 +129,11 @@ read 109 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt index 4c1030709a..d8b52c2c49 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt @@ -55,7 +55,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -67,7 +67,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -85,7 +85,7 @@ accepted connected -read 102 # size +read 116 # size 11s # join group 5s # v5 (int:newRequestId) @@ -98,20 +98,25 @@ read 102 # size 8s "consumer" # protocol type 1 # group protocol 7s "unknown" # protocol name - 0 # metadata + 14 # metadata size + [0..14] # metadata -write 109 # size +write 123 # size ${newRequestId} 0 # throttle time 0s # no error 3 # generated id - 7s "unknown" # protocol name + 7s "unknown" # protocol name 10s "memberId-1" # leader id 10s "memberId-1" # consumer member group id 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt index 1e7fa59c00..2aaa64152a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt @@ -65,7 +65,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -77,7 +77,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -101,7 +101,7 @@ connect await ROUTED_DESCRIBE_SERVER connected -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -114,9 +114,10 @@ write 105 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -127,7 +128,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group @@ -148,7 +153,7 @@ read 14 # size 27s # rebalance 0 # assignment -write 115 # size +write 129 # size 11s # join group 5s # v5 ${newRequestId} @@ -161,9 +166,10 @@ write 115 # size 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -174,7 +180,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt index 90912d4823..f4e31e22f9 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt @@ -55,7 +55,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -67,7 +67,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -85,11 +85,11 @@ accepted connected -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) - 5s "zilla" # no client id + 5s "zilla" # client id 4s "test" # consumer group 30000 # session timeout 4000 # rebalance timeout @@ -97,10 +97,11 @@ read 105 # size 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -111,7 +112,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group @@ -132,22 +137,23 @@ write 14 # size 27s # rebalance 0 # assignment -read 115 # size +read 129 # size 11s # join group 5s # v5 (int:newRequestId) - 5s "zilla" # no client id + 5s "zilla" # client id 4s "test" # consumer group 30000 # session timeout 4000 # rebalance timeout - 10s "memberId-1" # consumer group member + 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol 10s "highlander" # protocol name - 0 # metadata + 14 # metadata size + [0..14] # metadata -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -158,7 +164,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt index 54c9960f67..239d8459d2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt @@ -123,7 +123,7 @@ write 82 # size ${newRequestId} -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -135,7 +135,7 @@ read 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -188,7 +188,7 @@ read 20 # size -1s # authentication bytes 0L # session lifetime -write 105 # size +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -200,10 +200,11 @@ write 105 # size 42s ${instanceId} # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + ${kafka:randomBytes(14)} # metadata -read 112 # size +read 126 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -214,7 +215,11 @@ read 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions write 101 # size 14s # sync group diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt index 3b7326fcfb..77e185fbff 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt @@ -113,7 +113,7 @@ read 82 # size (int:requestId) -1s # no client id 1 # resources - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "node" topic 2 # configs 28s "group.min.session.timeout.ms" # name @@ -125,7 +125,7 @@ write 103 # size 1 # resources 0s # no error -1s # error message - [0x01] # broker resource + [0x04] # broker resource 1s "0" # "0" nodeId 2 # configs 28s "group.min.session.timeout.ms" # name @@ -173,7 +173,7 @@ write 20 # size -1s # authentication bytes 0L # session lifetime -read 105 # size +read 119 # size 11s # join group 5s # v5 (int:newRequestId) @@ -182,15 +182,15 @@ read 105 # size 30000 # session timeout 4000 # rebalance timeout 0s # consumer group member - 42s [0..42] # group instance id + 42s [0..42] # group instance id 8s "consumer" # protocol type 1 # group protocol - 10s "highlander" # protocol name - 0 # metadata + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata - -write 112 # size +write 126 # size ${newRequestId} 0 # throttle time 0s # no error @@ -201,7 +201,11 @@ write 112 # size 1 # members 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id - 0 # metadata + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions read 101 # size 14s # sync group From edbfba11d1e757292f2f59c94a58f5bf21fce718 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Tue, 12 Sep 2023 08:57:14 -0700 Subject: [PATCH 081/115] Remove unused extends OptionsConfig from non-options config classes (#403) --- .../runtime/binding/amqp/internal/config/AmqpRouteConfig.java | 3 +-- .../runtime/binding/mqtt/internal/config/MqttRouteConfig.java | 3 +-- .../runtime/exporter/otlp/config/OtlpEndpointConfig.java | 4 +--- .../runtime/exporter/otlp/config/OtlpOverridesConfig.java | 4 +--- .../grpc/kafka/internal/config/GrpcKafkaRouteConfig.java | 3 +-- .../runtime/binding/grpc/internal/config/GrpcRouteConfig.java | 3 +-- .../filesystem/internal/config/HttpFileSystemRouteConfig.java | 3 +-- .../http/kafka/internal/config/HttpKafkaRouteConfig.java | 3 +-- .../runtime/binding/http/internal/config/HttpRouteConfig.java | 3 +-- .../kafka/grpc/internal/config/KafkaGrpcRouteConfig.java | 3 +-- .../binding/kafka/internal/config/KafkaRouteConfig.java | 3 +-- .../sse/kafka/internal/config/SseKafkaRouteConfig.java | 3 +-- .../runtime/binding/sse/internal/config/SseRouteConfig.java | 3 +-- .../runtime/binding/tls/internal/config/TlsRouteConfig.java | 3 +-- .../runtime/binding/ws/internal/config/WsRouteConfig.java | 3 +-- .../prometheus/internal/config/PrometheusEndpointConfig.java | 4 +--- 16 files changed, 16 insertions(+), 35 deletions(-) diff --git a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpRouteConfig.java b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpRouteConfig.java index 017bd58877..c2041c48e7 100644 --- a/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpRouteConfig.java +++ b/incubator/binding-amqp/src/main/java/io/aklivity/zilla/runtime/binding/amqp/internal/config/AmqpRouteConfig.java @@ -22,10 +22,9 @@ import io.aklivity.zilla.runtime.binding.amqp.config.AmqpConditionConfig; import io.aklivity.zilla.runtime.binding.amqp.internal.types.AmqpCapabilities; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class AmqpRouteConfig extends OptionsConfig +public final class AmqpRouteConfig { public final long id; diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java index be2e40fda1..1f5f2f131d 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java @@ -22,10 +22,9 @@ import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class MqttRouteConfig extends OptionsConfig +public final class MqttRouteConfig { public final long id; diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpEndpointConfig.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpEndpointConfig.java index 6b7eea006b..78b507b7d1 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpEndpointConfig.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpEndpointConfig.java @@ -16,9 +16,7 @@ import java.net.URI; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; - -public class OtlpEndpointConfig extends OptionsConfig +public class OtlpEndpointConfig { public String protocol; public URI location; diff --git a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOverridesConfig.java b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOverridesConfig.java index 53a5b5596e..7fa6fc009d 100644 --- a/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOverridesConfig.java +++ b/incubator/exporter-otlp/src/main/java/io/aklivity/zilla/runtime/exporter/otlp/config/OtlpOverridesConfig.java @@ -16,9 +16,7 @@ import java.net.URI; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; - -public class OtlpOverridesConfig extends OptionsConfig +public class OtlpOverridesConfig { public URI metrics; diff --git a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaRouteConfig.java b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaRouteConfig.java index 6cd0342905..a04656541d 100644 --- a/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaRouteConfig.java +++ b/runtime/binding-grpc-kafka/src/main/java/io/aklivity/zilla/runtime/binding/grpc/kafka/internal/config/GrpcKafkaRouteConfig.java @@ -29,12 +29,11 @@ import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.String16FW; import io.aklivity.zilla.runtime.binding.grpc.kafka.internal.types.stream.GrpcMetadataFW; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; import io.aklivity.zilla.runtime.engine.util.function.LongObjectBiFunction; -public final class GrpcKafkaRouteConfig extends OptionsConfig +public final class GrpcKafkaRouteConfig { public final long id; diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcRouteConfig.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcRouteConfig.java index 15b2b8d5e2..3c20c81ba6 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcRouteConfig.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/config/GrpcRouteConfig.java @@ -22,10 +22,9 @@ import io.aklivity.zilla.runtime.binding.grpc.config.GrpcConditionConfig; import io.aklivity.zilla.runtime.binding.grpc.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.grpc.internal.types.stream.GrpcMetadataFW; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class GrpcRouteConfig extends OptionsConfig +public final class GrpcRouteConfig { public final long id; diff --git a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemRouteConfig.java b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemRouteConfig.java index ae18b63032..457f18fa73 100644 --- a/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemRouteConfig.java +++ b/runtime/binding-http-filesystem/src/main/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/config/HttpFileSystemRouteConfig.java @@ -22,10 +22,9 @@ import java.util.function.LongPredicate; import io.aklivity.zilla.runtime.binding.http.filesystem.config.HttpFileSystemConditionConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class HttpFileSystemRouteConfig extends OptionsConfig +public final class HttpFileSystemRouteConfig { public final long id; public final Optional with; diff --git a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaRouteConfig.java b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaRouteConfig.java index f1d554eb22..0fe8a77de5 100644 --- a/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaRouteConfig.java +++ b/runtime/binding-http-kafka/src/main/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/config/HttpKafkaRouteConfig.java @@ -26,11 +26,10 @@ import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaConditionConfig; import io.aklivity.zilla.runtime.binding.http.kafka.config.HttpKafkaOptionsConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; import io.aklivity.zilla.runtime.engine.util.function.LongObjectBiFunction; -public final class HttpKafkaRouteConfig extends OptionsConfig +public final class HttpKafkaRouteConfig { public final long id; public final HttpKafkaWithResolver with; diff --git a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRouteConfig.java b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRouteConfig.java index 1249894a7e..569086176a 100644 --- a/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRouteConfig.java +++ b/runtime/binding-http/src/main/java/io/aklivity/zilla/runtime/binding/http/internal/config/HttpRouteConfig.java @@ -22,10 +22,9 @@ import java.util.function.LongPredicate; import io.aklivity.zilla.runtime.binding.http.config.HttpConditionConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class HttpRouteConfig extends OptionsConfig +public final class HttpRouteConfig { public final long id; diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcRouteConfig.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcRouteConfig.java index 9216569441..d95db803e0 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcRouteConfig.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/config/KafkaGrpcRouteConfig.java @@ -21,10 +21,9 @@ import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcConditionConfig; import io.aklivity.zilla.runtime.binding.kafka.grpc.config.KafkaGrpcOptionsConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class KafkaGrpcRouteConfig extends OptionsConfig +public final class KafkaGrpcRouteConfig { public final long id; public final KafkaGrpcWithConfig with; diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java index ec6d5f54cc..30a21d5d03 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/config/KafkaRouteConfig.java @@ -21,10 +21,9 @@ import java.util.function.LongPredicate; import io.aklivity.zilla.runtime.binding.kafka.config.KafkaConditionConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class KafkaRouteConfig extends OptionsConfig +public final class KafkaRouteConfig { public final long id; public final KafkaWithConfig with; diff --git a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaRouteConfig.java b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaRouteConfig.java index b28a8b938a..abbee580bb 100644 --- a/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaRouteConfig.java +++ b/runtime/binding-sse-kafka/src/main/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/config/SseKafkaRouteConfig.java @@ -26,11 +26,10 @@ import java.util.stream.Collectors; import io.aklivity.zilla.runtime.binding.sse.kafka.config.SseKafkaConditionConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; import io.aklivity.zilla.runtime.engine.util.function.LongObjectBiFunction; -public final class SseKafkaRouteConfig extends OptionsConfig +public final class SseKafkaRouteConfig { public final long id; public final Optional with; diff --git a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseRouteConfig.java b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseRouteConfig.java index 1a0f3388e8..2b5e0f25a8 100644 --- a/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseRouteConfig.java +++ b/runtime/binding-sse/src/main/java/io/aklivity/zilla/runtime/binding/sse/internal/config/SseRouteConfig.java @@ -21,10 +21,9 @@ import java.util.function.LongPredicate; import io.aklivity.zilla.runtime.binding.sse.config.SseConditionConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class SseRouteConfig extends OptionsConfig +public final class SseRouteConfig { public final long id; diff --git a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsRouteConfig.java b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsRouteConfig.java index 1dd7a45e76..180d8abc8d 100644 --- a/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsRouteConfig.java +++ b/runtime/binding-tls/src/main/java/io/aklivity/zilla/runtime/binding/tls/internal/config/TlsRouteConfig.java @@ -21,10 +21,9 @@ import java.util.function.LongPredicate; import io.aklivity.zilla.runtime.binding.tls.config.TlsConditionConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class TlsRouteConfig extends OptionsConfig +public final class TlsRouteConfig { public final long id; diff --git a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsRouteConfig.java b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsRouteConfig.java index 5515051730..eeadbc1c9b 100644 --- a/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsRouteConfig.java +++ b/runtime/binding-ws/src/main/java/io/aklivity/zilla/runtime/binding/ws/internal/config/WsRouteConfig.java @@ -21,10 +21,9 @@ import java.util.function.LongPredicate; import io.aklivity.zilla.runtime.binding.ws.config.WsConditionConfig; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; -public final class WsRouteConfig extends OptionsConfig +public final class WsRouteConfig { public final long id; public final int order; diff --git a/runtime/exporter-prometheus/src/main/java/io/aklivity/zilla/runtime/exporter/prometheus/internal/config/PrometheusEndpointConfig.java b/runtime/exporter-prometheus/src/main/java/io/aklivity/zilla/runtime/exporter/prometheus/internal/config/PrometheusEndpointConfig.java index 4e05c2d7aa..c8a04843e6 100644 --- a/runtime/exporter-prometheus/src/main/java/io/aklivity/zilla/runtime/exporter/prometheus/internal/config/PrometheusEndpointConfig.java +++ b/runtime/exporter-prometheus/src/main/java/io/aklivity/zilla/runtime/exporter/prometheus/internal/config/PrometheusEndpointConfig.java @@ -14,9 +14,7 @@ */ package io.aklivity.zilla.runtime.exporter.prometheus.internal.config; -import io.aklivity.zilla.runtime.engine.config.OptionsConfig; - -public class PrometheusEndpointConfig extends OptionsConfig +public class PrometheusEndpointConfig { public String scheme; public int port; From f2db3142da9bf883b58d472a6f977b5779b0625a Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Wed, 13 Sep 2023 11:05:02 -0700 Subject: [PATCH 082/115] Consumer related bug fixes (#405) --- .../command/log/internal/LoggableStream.java | 45 +++ .../KafkaCacheClientConsumerFactory.java | 117 +++--- .../KafkaCacheServerConsumerFactory.java | 171 +++++++-- .../stream/KafkaClientGroupFactory.java | 57 ++- .../internal/stream/CacheConsumerIT.java | 12 + .../kafka/internal/KafkaFunctions.java | 347 ++++++++++++++---- .../consumer/partition.assignment/client.rpt | 5 +- .../consumer/partition.assignment/server.rpt | 5 +- .../consumer/reassign.new.topic/client.rpt | 83 +++++ .../consumer/reassign.new.topic/server.rpt | 89 +++++ .../group/partition.assignment/client.rpt | 34 +- .../group/partition.assignment/server.rpt | 32 +- .../group/reassign.new.topic/client.rpt | 183 +++++++++ .../group/reassign.new.topic/server.rpt | 188 ++++++++++ .../client.rpt | 56 ++- .../server.rpt | 58 ++- .../kafka/internal/KafkaFunctionsTest.java | 42 ++- .../kafka/streams/application/ConsumerIT.java | 9 + .../kafka/streams/application/GroupIT.java | 9 + 19 files changed, 1357 insertions(+), 185 deletions(-) create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/server.rpt diff --git a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java index d8621a9f2e..a981f005f6 100644 --- a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java +++ b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java @@ -78,6 +78,9 @@ import io.aklivity.zilla.runtime.command.log.internal.types.stream.HttpFlushExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaBootstrapBeginExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaConsumerAssignmentFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaConsumerBeginExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaConsumerDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaDescribeBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaDescribeDataExFW; @@ -95,6 +98,7 @@ import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaProduceBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaProduceDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaResetExFW; +import io.aklivity.zilla.runtime.command.log.internal.types.stream.KafkaTopicPartitionFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.runtime.command.log.internal.types.stream.MqttFlushExFW; @@ -890,6 +894,9 @@ private void onKafkaBeginEx( case KafkaBeginExFW.KIND_GROUP: onKafkaGroupBeginEx(offset, timestamp, kafkaBeginEx.group()); break; + case KafkaBeginExFW.KIND_CONSUMER: + onKafkaConsumerBeginEx(offset, timestamp, kafkaBeginEx.consumer()); + break; case KafkaBeginExFW.KIND_FETCH: onKafkaFetchBeginEx(offset, timestamp, kafkaBeginEx.fetch()); break; @@ -959,6 +966,22 @@ private void onKafkaGroupBeginEx( groupId.asString(), protocol.asString(), timeout)); } + private void onKafkaConsumerBeginEx( + int offset, + long timestamp, + KafkaConsumerBeginExFW consumer) + { + String16FW groupId = consumer.groupId(); + String16FW consumerId = consumer.consumerId(); + String16FW topic = consumer.topic(); + int timeout = consumer.timeout(); + Array32FW partitions = consumer.partitionIds(); + + out.printf(verboseFormat, index, offset, timestamp, format("[consumer] %s %s %s %d", + groupId.asString(), consumerId.asString(), topic.asString(), timeout)); + partitions.forEach(p -> out.printf(verboseFormat, index, offset, timestamp, format("%d", p.partitionId()))); + } + private void onKafkaFetchBeginEx( int offset, long timestamp, @@ -1078,6 +1101,9 @@ private void onKafkaDataEx( case KafkaDataExFW.KIND_FETCH: onKafkaFetchDataEx(offset, timestamp, kafkaDataEx.fetch()); break; + case KafkaDataExFW.KIND_CONSUMER: + onKafkaConsumerDataEx(offset, timestamp, kafkaDataEx.consumer()); + break; case KafkaDataExFW.KIND_MERGED: onKafkaMergedDataEx(offset, timestamp, kafkaDataEx.merged()); break; @@ -1122,6 +1148,25 @@ private void onKafkaFetchDataEx( format("%s: %s", asString(h.name()), asString(h.value())))); } + private void onKafkaConsumerDataEx( + int offset, + long timestamp, + KafkaConsumerDataExFW consumer) + { + Array32FW partitions = consumer.partitions(); + Array32FW assignments = consumer.assignments(); + + out.printf(verboseFormat, index, offset, timestamp, "[consumer]"); + partitions.forEach(p -> out.printf(verboseFormat, index, offset, timestamp, + format("%d", p.partitionId()))); + assignments.forEach(a -> + { + out.printf(verboseFormat, index, offset, timestamp, a.consumerId().asString()); + a.partitions().forEach(p -> + out.printf(verboseFormat, index, offset, timestamp, format("%d", p.partitionId()))); + }); + } + private void onKafkaMergedDataEx( int offset, long timestamp, diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java index 7bf4d689c2..67c5f7b8b6 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java @@ -93,7 +93,7 @@ public final class KafkaCacheClientConsumerFactory implements BindingHandler private final LongFunction supplyLocalName; private final LongFunction supplyBinding; - private final Object2ObjectHashMap clientConsumerFansByGroupId; + private final Object2ObjectHashMap clientConsumerFansByConsumer; public KafkaCacheClientConsumerFactory( KafkaConfiguration config, @@ -112,7 +112,7 @@ public KafkaCacheClientConsumerFactory( this.supplyNamespace = context::supplyNamespace; this.supplyLocalName = context::supplyLocalName; this.supplyBinding = supplyBinding; - this.clientConsumerFansByGroupId = new Object2ObjectHashMap<>(); + this.clientConsumerFansByConsumer = new Object2ObjectHashMap<>(); } @Override @@ -154,19 +154,20 @@ public MessageConsumer newStream( { final long resolvedId = resolved.id; - KafkaCacheClientConsumerFanout fanout = clientConsumerFansByGroupId.get(groupId); + String fanKey = String.format("%s-%s-%s-%d", groupId, topic, consumerId, resolvedId); + KafkaCacheClientConsumerFan fan = clientConsumerFansByConsumer.get(fanKey); - if (fanout == null) + if (fan == null) { - KafkaCacheClientConsumerFanout newFanout = - new KafkaCacheClientConsumerFanout(routedId, resolvedId, authorization, groupId, + KafkaCacheClientConsumerFan newFan = + new KafkaCacheClientConsumerFan(routedId, resolvedId, authorization, groupId, topic, consumerId, partitions, timeout); - fanout = newFanout; - clientConsumerFansByGroupId.put(groupId, fanout); + fan = newFan; + clientConsumerFansByConsumer.put(fanKey, fan); } newStream = new KafkaCacheClientConsumerStream( - fanout, + fan, sender, originId, routedId, @@ -266,7 +267,7 @@ private void doDataNull( .authorization(authorization) .budgetId(budgetId) .reserved(reserved) - .extension(extension.buffer(), extension.offset(), extension.limit()) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) .build(); receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); @@ -380,7 +381,7 @@ private void doReset( sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); } - final class KafkaCacheClientConsumerFanout + final class KafkaCacheClientConsumerFan { private final long originId; private final long routedId; @@ -409,7 +410,7 @@ final class KafkaCacheClientConsumerFanout private int replyMax; - private KafkaCacheClientConsumerFanout( + private KafkaCacheClientConsumerFan( long originId, long routedId, long authorization, @@ -432,7 +433,7 @@ private KafkaCacheClientConsumerFanout( this.assignments = new Object2ObjectHashMap<>(); } - private void onConsumerFanoutMemberOpening( + private void onConsumerFanMemberOpening( long traceId, KafkaCacheClientConsumerStream member) { @@ -440,7 +441,7 @@ private void onConsumerFanoutMemberOpening( assert !members.isEmpty(); - doConsumerFanoutInitialBeginIfNecessary(traceId); + doConsumerFanInitialBeginIfNecessary(traceId); if (KafkaState.initialOpened(state)) { @@ -453,7 +454,7 @@ private void onConsumerFanoutMemberOpening( } } - private void onConsumerFanoutMemberOpened( + private void onConsumerFanMemberOpened( long traceId, KafkaCacheClientConsumerStream member) { @@ -474,7 +475,7 @@ private void onConsumerFanoutMemberOpened( } } - private void onConsumerFanoutMemberClosed( + private void onConsumerFanMemberClosed( long traceId, KafkaCacheClientConsumerStream member) { @@ -482,12 +483,12 @@ private void onConsumerFanoutMemberClosed( if (members.isEmpty()) { - doConsumerFanoutInitialEndIfNecessary(traceId); - doConsumerFanoutReplyResetIfNecessary(traceId); + doConsumerFanInitialEndIfNecessary(traceId); + doConsumerFanReplyResetIfNecessary(traceId); } } - private void doConsumerFanoutInitialBeginIfNecessary( + private void doConsumerFanInitialBeginIfNecessary( long traceId) { if (KafkaState.closed(state)) @@ -497,18 +498,18 @@ private void doConsumerFanoutInitialBeginIfNecessary( if (!KafkaState.initialOpening(state)) { - doConsumerFanoutInitialBegin(traceId); + doConsumerFanInitialBegin(traceId); } } - private void doConsumerFanoutInitialBegin( + private void doConsumerFanInitialBegin( long traceId) { assert state == 0; this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); - this.receiver = newStream(this::onConsumerFanoutMessage, + this.receiver = newStream(this::onConsumerFanMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, 0L, ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l) @@ -523,16 +524,16 @@ private void doConsumerFanoutInitialBegin( state = KafkaState.openingInitial(state); } - private void doConsumerFanoutInitialEndIfNecessary( + private void doConsumerFanInitialEndIfNecessary( long traceId) { if (!KafkaState.initialClosed(state)) { - doConsumerFanoutInitialEnd(traceId); + doConsumerFanInitialEnd(traceId); } } - private void doConsumerFanoutInitialEnd( + private void doConsumerFanInitialEnd( long traceId) { doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, @@ -541,16 +542,16 @@ private void doConsumerFanoutInitialEnd( state = KafkaState.closedInitial(state); } - private void doConsumerFanoutInitialAbortIfNecessary( + private void doConsumerFanInitialAbortIfNecessary( long traceId) { if (!KafkaState.initialClosed(state)) { - doConsumerFanoutInitialAbort(traceId); + doConsumerFanInitialAbort(traceId); } } - private void doConsumerFanoutInitialAbort( + private void doConsumerFanInitialAbort( long traceId) { doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, @@ -559,7 +560,7 @@ private void doConsumerFanoutInitialAbort( state = KafkaState.closedInitial(state); } - private void onConsumerFanoutInitialReset( + private void onConsumerFanInitialReset( ResetFW reset) { final long traceId = reset.traceId(); @@ -570,12 +571,12 @@ private void onConsumerFanoutInitialReset( state = KafkaState.closedInitial(state); - doConsumerFanoutReplyResetIfNecessary(traceId); + doConsumerFanReplyResetIfNecessary(traceId); members.forEach(s -> s.doConsumerInitialResetIfNecessary(traceId)); } - private void onConsumerFanoutInitialWindow( + private void onConsumerFanInitialWindow( WindowFW window) { if (!KafkaState.initialOpened(state)) @@ -589,7 +590,7 @@ private void onConsumerFanoutInitialWindow( } } - private void onConsumerFanoutMessage( + private void onConsumerFanMessage( int msgTypeId, DirectBuffer buffer, int index, @@ -599,34 +600,34 @@ private void onConsumerFanoutMessage( { case BeginFW.TYPE_ID: final BeginFW begin = beginRO.wrap(buffer, index, index + length); - onConsumerFanoutReplyBegin(begin); + onConsumerFanReplyBegin(begin); break; case DataFW.TYPE_ID: final DataFW data = dataRO.wrap(buffer, index, index + length); - onConsumerFanoutReplyData(data); + onConsumerFanReplyData(data); break; case EndFW.TYPE_ID: final EndFW end = endRO.wrap(buffer, index, index + length); - onConsumerFanoutReplyEnd(end); + onConsumerFanReplyEnd(end); break; case AbortFW.TYPE_ID: final AbortFW abort = abortRO.wrap(buffer, index, index + length); - onConsumerFanoutReplyAbort(abort); + onConsumerFanReplyAbort(abort); break; case ResetFW.TYPE_ID: final ResetFW reset = resetRO.wrap(buffer, index, index + length); - onConsumerFanoutInitialReset(reset); + onConsumerFanInitialReset(reset); break; case WindowFW.TYPE_ID: final WindowFW window = windowRO.wrap(buffer, index, index + length); - onConsumerFanoutInitialWindow(window); + onConsumerFanInitialWindow(window); break; default: break; } } - private void onConsumerFanoutReplyBegin( + private void onConsumerFanReplyBegin( BeginFW begin) { final long traceId = begin.traceId(); @@ -635,10 +636,10 @@ private void onConsumerFanoutReplyBegin( members.forEach(s -> s.doConsumerReplyBeginIfNecessary(traceId)); - doConsumerFanoutReplyWindow(traceId, 0, bufferPool.slotCapacity()); + doConsumerFanReplyWindow(traceId, 0, bufferPool.slotCapacity()); } - private void onConsumerFanoutReplyData( + private void onConsumerFanReplyData( DataFW data) { final long sequence = data.sequence(); @@ -679,43 +680,43 @@ private void onConsumerFanoutReplyData( members.forEach(s -> s.doConsumerReplyDataIfNecessary(traceId, kafkaDataEx)); } - doConsumerFanoutReplyWindow(traceId, 0, replyMax); + doConsumerFanReplyWindow(traceId, 0, replyMax); } - private void onConsumerFanoutReplyEnd( + private void onConsumerFanReplyEnd( EndFW end) { final long traceId = end.traceId(); state = KafkaState.closedReply(state); - doConsumerFanoutInitialEndIfNecessary(traceId); + doConsumerFanInitialEndIfNecessary(traceId); members.forEach(s -> s.doConsumerReplyEndIfNecessary(traceId)); } - private void onConsumerFanoutReplyAbort( + private void onConsumerFanReplyAbort( AbortFW abort) { final long traceId = abort.traceId(); state = KafkaState.closedReply(state); - doConsumerFanoutInitialAbortIfNecessary(traceId); + doConsumerFanInitialAbortIfNecessary(traceId); members.forEach(s -> s.doConsumerReplyAbortIfNecessary(traceId)); } - private void doConsumerFanoutReplyResetIfNecessary( + private void doConsumerFanReplyResetIfNecessary( long traceId) { if (!KafkaState.replyClosed(state)) { - doConsumerFanoutReplyReset(traceId); + doConsumerFanReplyReset(traceId); } } - private void doConsumerFanoutReplyReset( + private void doConsumerFanReplyReset( long traceId) { doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, @@ -724,7 +725,7 @@ private void doConsumerFanoutReplyReset( state = KafkaState.closedReply(state); } - private void doConsumerFanoutReplyWindow( + private void doConsumerFanReplyWindow( long traceId, int minReplyNoAck, int minReplyMax) @@ -748,7 +749,7 @@ private void doConsumerFanoutReplyWindow( private final class KafkaCacheClientConsumerStream { - private final KafkaCacheClientConsumerFanout group; + private final KafkaCacheClientConsumerFan fan; private final MessageConsumer sender; private final long originId; private final long routedId; @@ -771,7 +772,7 @@ private final class KafkaCacheClientConsumerStream private long replyBudgetId; KafkaCacheClientConsumerStream( - KafkaCacheClientConsumerFanout group, + KafkaCacheClientConsumerFan fan, MessageConsumer sender, long originId, long routedId, @@ -779,7 +780,7 @@ private final class KafkaCacheClientConsumerStream long affinity, long authorization) { - this.group = group; + this.fan = fan; this.sender = sender; this.originId = originId; this.routedId = routedId; @@ -829,7 +830,7 @@ private void onConsumerInitialBegin( state = KafkaState.openingInitial(state); - group.onConsumerFanoutMemberOpening(traceId, this); + fan.onConsumerFanMemberOpening(traceId, this); } private void onConsumerInitialEnd( @@ -839,7 +840,7 @@ private void onConsumerInitialEnd( state = KafkaState.closedInitial(state); - group.onConsumerFanoutMemberClosed(traceId, this); + fan.onConsumerFanMemberClosed(traceId, this); doConsumerReplyEndIfNecessary(traceId); } @@ -851,7 +852,7 @@ private void onConsumerInitialAbort( state = KafkaState.closedInitial(state); - group.onConsumerFanoutMemberClosed(traceId, this); + fan.onConsumerFanMemberClosed(traceId, this); doConsumerReplyAbortIfNecessary(traceId); } @@ -984,7 +985,7 @@ private void onConsumerReplyReset( state = KafkaState.closedInitial(state); - group.onConsumerFanoutMemberClosed(traceId, this); + fan.onConsumerFanMemberClosed(traceId, this); doConsumerInitialResetIfNecessary(traceId); } @@ -1015,7 +1016,7 @@ private void onConsumerReplyWindow( state = KafkaState.openedReply(state); final long traceId = window.traceId(); - group.onConsumerFanoutMemberOpened(traceId, this); + fan.onConsumerFanMemberOpened(traceId, this); } } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java index bb82dff173..f658b13867 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java @@ -47,6 +47,7 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupTopicMetadataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; @@ -144,7 +145,8 @@ public MessageConsumer newStream( final KafkaConsumerBeginExFW kafkaConsumerBeginEx = kafkaBeginEx.consumer(); final String groupId = kafkaConsumerBeginEx.groupId().asString(); final String topic = kafkaConsumerBeginEx.topic().asString(); - final String consumerId = kafkaConsumerBeginEx.consumerId().asString(); + String consumerId = kafkaConsumerBeginEx.consumerId().asString(); + consumerId = consumerId != null ? consumerId : ""; final int timeout = kafkaConsumerBeginEx.timeout(); final List partitions = new ArrayList<>(); kafkaConsumerBeginEx.partitionIds().forEach(p -> partitions.add(p.partitionId())); @@ -524,11 +526,11 @@ final class KafkaCacheServerConsumerFanout private final long originId; private final long routedId; private final long authorization; - private final int timeout; private final List streams; private final Object2ObjectHashMap members; private final Object2ObjectHashMap partitionsByTopic; - private final Object2ObjectHashMap> assignment; + private final Object2ObjectHashMap> consumers; + private final Object2ObjectHashMap assignments; private long initialId; private long replyId; @@ -547,6 +549,7 @@ final class KafkaCacheServerConsumerFanout private int replyPad; private String leaderId; private String memberId; + private int timeout; private KafkaCacheServerConsumerFanout( @@ -566,11 +569,55 @@ private KafkaCacheServerConsumerFanout( this.streams = new ArrayList<>(); this.members = new Object2ObjectHashMap<>(); this.partitionsByTopic = new Object2ObjectHashMap<>(); - this.assignment = new Object2ObjectHashMap<>(); + this.consumers = new Object2ObjectHashMap<>(); + this.assignments = new Object2ObjectHashMap<>(); + } + + private void onConsumerFanoutStreamOpening( + long traceId, + KafkaCacheServerConsumerStream stream) + { + streams.add(stream); + + assert !streams.isEmpty(); + + doConsumerInitialBegin(traceId, stream); + + if (KafkaState.initialOpened(state)) + { + stream.doConsumerInitialWindow(authorization, traceId, initialBud, 0); + } + + if (KafkaState.replyOpened(state)) + { + stream.doConsumerReplyBegin(traceId); + } + } + + private void onConsumerFanoutMemberOpened( + long traceId, + KafkaCacheServerConsumerStream stream) + { + final TopicConsumer topicConsumer = assignments.get(stream.topic); + if (topicConsumer != null) + { + stream.doConsumerReplyData(traceId, 3, replyPad, EMPTY_OCTETS, + ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .consumer(c -> c.partitions(p -> topicConsumer + .partitions.forEach(np -> p.item(tp -> tp.partitionId(np)))) + .assignments(a -> topicConsumer.consumers.forEach(u -> + a.item(ua -> ua.consumerId(u.consumerId).partitions(p -> u.partitions + .forEach(np -> + p.item(tp -> tp.partitionId(np)))))))) + .build() + .sizeof())); + } } private void doConsumerInitialBegin( - long traceId) + long traceId, + KafkaCacheServerConsumerStream stream) { if (KafkaState.closed(state)) { @@ -605,6 +652,34 @@ private void doConsumerInitialBegin( .build().sizeof())); state = KafkaState.openingInitial(state); } + else if (!assignments.containsKey(stream.topic)) + { + doConsumerInitialFlush(traceId, + ex -> ex.set((b, o, l) -> kafkaFlushExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .group(g -> g.leaderId(leaderId) + .memberId(memberId) + .members(gm -> + { + KafkaGroupMemberMetadataFW metadata = kafkaGroupMemberMetadataRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .consumerId(consumerId) + .topics(t -> streams.forEach(s -> t.item(tp -> tp + .topic(s.topic) + .partitions(p -> s.partitions.forEach(sp -> + p.item(gtp -> gtp.partitionId(sp))))))) + .build(); + + gm.item(i -> + { + KafkaGroupMemberFW.Builder builder = i.id(memberId); + builder.metadataLen(metadata.sizeof()) + .metadata(metadata.buffer(), 0, metadata.sizeof()); + }); + })) + .build() + .sizeof())); + } } private void doConsumerInitialData( @@ -751,9 +826,9 @@ private void onConsumerReplyBegin( { final long traceId = begin.traceId(); - state = KafkaState.openingReply(state); + state = KafkaState.openedReply(state); - streams.forEach(m -> m.doConsumerReplyBegin(traceId, begin.extension())); + streams.forEach(m -> m.doConsumerReplyBegin(traceId)); } private void onConsumerReplyFlush( @@ -794,16 +869,15 @@ private void onConsumerReplyFlush( .wrap(metadata.buffer(), metadata.offset(), metadata.limit()); final String consumerId = kafkaGroupMemberMetadataRO.consumerId().asString(); + final String mId = m.id().asString(); + members.put(mId, consumerId); + groupMetadata.topics().forEach(mt -> { - final String mId = m.id().asString(); - members.put(mId, consumerId); - final String topic = mt.topic().asString(); IntHashSet partitions = partitionsByTopic.computeIfAbsent(topic, s -> new IntHashSet()); mt.partitions().forEach(p -> partitions.add(p.partitionId())); }); - }); } @@ -832,22 +906,41 @@ private void onConsumerReplyData( Array32FW topicAssignments = topicAssignmentsRO .wrap(payload.buffer(), payload.offset(), payload.limit()); + this.assignments.clear(); + topicAssignments.forEach(ta -> { KafkaCacheServerConsumerStream stream = streams.stream().filter(s -> s.topic.equals(ta.topic().asString())).findFirst().get(); + IntHashSet partitions = new IntHashSet(); + List topicConsumers = new ArrayList<>(); stream.doConsumerReplyData(traceId, flags, replyPad, EMPTY_OCTETS, ex -> ex.set((b, o, l) -> kafkaDataExRW.wrap(b, o, l) .typeId(kafkaTypeId) .consumer(c -> c.partitions(p -> ta - .partitions() - .forEach(np -> p.item(tp -> tp.partitionId(np.partitionId())))) + .partitions() + .forEach(np -> + { + partitions.add(np.partitionId()); + p.item(tp -> tp.partitionId(np.partitionId())); + })) .assignments(a -> ta.userdata().forEach(u -> + { + final IntHashSet consumerTopicPartitions = new IntHashSet(); a.item(ua -> ua.consumerId(u.consumerId()).partitions(p -> u.partitions() - .forEach(np -> p.item(tp -> tp.partitionId(np.partitionId())))))))) + .forEach(np -> + { + consumerTopicPartitions.add(np.partitionId()); + p.item(tp -> tp.partitionId(np.partitionId())); + }))); + topicConsumers.add(new TopicPartition(u.consumerId().asString(), ta.topic().asString(), + consumerTopicPartitions)); + }))) .build() .sizeof())); + + assignments.put(ta.topic().asString(), new TopicConsumer(partitions, topicConsumers)); }); } @@ -921,6 +1014,8 @@ private void doPartitionAssignment( if (memberId.equals(leaderId)) { int memberSize = members.size(); + consumers.clear(); + partitionsByTopic.forEach((t, p) -> { final int partitionSize = p.size(); @@ -933,13 +1028,14 @@ private void doPartitionAssignment( for (String member : members.keySet()) { String consumerId = members.get(member); - List topicPartitions = assignment.computeIfAbsent( + List topicPartitions = consumers.computeIfAbsent( member, tp -> new ArrayList<>()); - List partitions = new ArrayList<>(); + IntHashSet partitions = new IntHashSet(); + IntHashSet.IntIterator iterator = p.iterator(); for (; partitionIndex < newPartitionPerTopic; partitionIndex++) { - partitions.add(p.iterator().next()); + partitions.add(iterator.next()); } topicPartitions.add(new TopicPartition(consumerId, t, partitions)); @@ -955,17 +1051,17 @@ private void doMemberAssigment( long traceId, long authorization) { - if (!assignment.isEmpty()) + if (!consumers.isEmpty()) { Array32FW assignment = memberAssignmentRW .wrap(writeBuffer, DataFW.FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) - .item(ma -> this.assignment.forEach((k, v) -> + .item(ma -> this.consumers.forEach((k, v) -> ma.memberId(k) .assignments(ta -> v.forEach(tp -> ta.item(i -> i.topic(tp.topic) .partitions(p -> tp.partitions.forEach(t -> p.item(tpa -> tpa.partitionId(t)))) .userdata(u -> - this.assignment.forEach((ak, av) -> + this.consumers.forEach((ak, av) -> av.stream().filter(atp -> atp.topic.equals(tp.topic)).forEach(at -> u.item(ud -> ud .consumerId(at.consumerId) @@ -1095,9 +1191,7 @@ private void onConsumerInitialBegin( assert initialAck <= initialSeq; - fanout.streams.add(this); - - fanout.doConsumerInitialBegin(traceId); + fanout.onConsumerFanoutStreamOpening(traceId, this); } private void onConsumerInitialData( @@ -1197,13 +1291,12 @@ private void doConsumerInitialWindow( } private void doConsumerReplyBegin( - long traceId, - OctetsFW extension) + long traceId) { state = KafkaState.openingReply(state); doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, affinity, extension); + traceId, authorization, affinity, EMPTY_OCTETS); } private void doConsumerReplyData( @@ -1287,12 +1380,18 @@ private void onConsumerReplyWindow( replyBud = budgetId; replyPad = padding; replyCap = capabilities; - state = KafkaState.openedReply(state); assert replyAck <= replySeq; fanout.replyMax = replyMax; fanout.doConsumerReplyWindow(traceId, authorizationId, budgetId, padding); + + if (!KafkaState.replyOpened(state)) + { + state = KafkaState.openedReply(state); + + fanout.onConsumerFanoutMemberOpened(traceId, this); + } } private void cleanup( @@ -1307,16 +1406,30 @@ final class TopicPartition { private final String consumerId; private final String topic; - private final List partitions; + private final IntHashSet partitions; TopicPartition( String consumerId, String topic, - List partitions) + IntHashSet partitions) { this.consumerId = consumerId; this.topic = topic; this.partitions = partitions; } } + + final class TopicConsumer + { + private final IntHashSet partitions; + private final List consumers; + + TopicConsumer( + IntHashSet partitions, + List consumers) + { + this.partitions = partitions; + this.consumers = consumers; + } + } } diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index d1693edccc..5d44a6ebe4 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -84,6 +84,7 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW; @@ -143,6 +144,7 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final SignalFW signalRO = new SignalFW(); private final ExtensionFW extensionRO = new ExtensionFW(); private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW(); + private final KafkaFlushExFW kafkaFlushExRO = new KafkaFlushExFW(); private final BeginFW.Builder beginRW = new BeginFW.Builder(); private final DataFW.Builder dataRW = new DataFW.Builder(); @@ -1378,9 +1380,50 @@ private void onApplicationEnd( private void onApplicationFlush( FlushFW flush) { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); final long traceId = flush.traceId(); + final long authorizationId = flush.authorization(); + final int reserved = flush.reserved(); + final OctetsFW extension = flush.extension(); - coordinatorClient.doHeartbeat(traceId); + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence + reserved; + initialAck = initialSeq; + + if (extension.sizeof() > 0) + { + final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()); + final KafkaFlushExFW kafkaFlushEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? + kafkaFlushExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null; + + assert kafkaFlushEx.kind() == KafkaBeginExFW.KIND_GROUP; + final KafkaGroupFlushExFW kafkaGroupFlushEx = kafkaFlushEx.group(); + + Array32FW members = kafkaGroupFlushEx.members(); + + assert members.fieldCount() == 1; + + members.forEach(m -> + { + OctetsFW metadata = m.metadata(); + final int metadataSize = m.metadataLen(); + + if (metadataSize > 0) + { + metadataBuffer.putBytes(0, metadata.value(), 0, metadataSize); + topicMetadataLimit = metadataSize; + } + }); + + coordinatorClient.doJoinGroupRequest(traceId); + } + else + { + coordinatorClient.doHeartbeat(traceId); + } } private void onApplicationAbort( @@ -3683,6 +3726,18 @@ private void doSyncRequest( doEncodeSyncGroupRequest(traceId, budgetId); } + private void doJoinGroupRequest( + long traceId) + { + if (heartbeatRequestId != NO_CANCEL_ID) + { + signaler.cancel(heartbeatRequestId); + heartbeatRequestId = NO_CANCEL_ID; + } + + doEncodeJoinGroupRequest(traceId, 0); + } + private void doHeartbeat( long traceId) { diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java index 20835f64ff..c282989190 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java @@ -62,4 +62,16 @@ public void shouldAssignPartition() throws Exception k3po.finish(); } + @Test + @Configuration("cache.yaml") + @Specification({ + "${app}/reassign.new.topic/client", + "${net}/reassign.new.topic/server" + }) + @ScriptProperty("serverAddress \"zilla://streams/app1\"") + public void shouldReassignOnUpdatedTopic() throws Exception + { + k3po.finish(); + } + } diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java index da9f248554..c62c221290 100644 --- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java +++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java @@ -25,6 +25,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.function.Predicate; + import org.agrona.DirectBuffer; import org.agrona.MutableDirectBuffer; import org.agrona.concurrent.UnsafeBuffer; @@ -59,6 +60,7 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.OctetsFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.String16FW; import io.aklivity.zilla.specs.binding.kafka.internal.types.String8FW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.ConsumerAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.MemberAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.rebalance.TopicAssignmentFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaApi; @@ -78,6 +80,7 @@ import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupMemberMetadataFW; +import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaGroupTopicMetadataFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedBeginExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedConsumerFlushExFW; import io.aklivity.zilla.specs.binding.kafka.internal.types.stream.KafkaMergedDataExFW; @@ -290,15 +293,15 @@ public static KafkaGroupMemberMetadataBuilder memberMetadata() } @Function - public static MemberAssignmentsBuilder memberAssignment() + public static KafkaMemberAssignmentsBuilder memberAssignment() { - return new MemberAssignmentsBuilder(); + return new KafkaMemberAssignmentsBuilder(); } @Function - public static TopicAssignmentsBuilder topicAssignment() + public static KafkaTopicAssignmentsBuilder topicAssignment() { - return new TopicAssignmentsBuilder(); + return new KafkaTopicAssignmentsBuilder(); } public abstract static class KafkaHeadersBuilder @@ -583,7 +586,6 @@ private void set( public static final class KafkaGroupMemberMetadataBuilder { private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); - private final KafkaGroupMemberMetadataFW.Builder groupMemberMetadataRW = new KafkaGroupMemberMetadataFW.Builder(); @@ -599,14 +601,11 @@ public KafkaGroupMemberMetadataBuilder consumerId( return this; } - public KafkaGroupMemberMetadataBuilder topic( - String topic, - int partitionId) + public KafkaTopicsBuilder topic( + String topic) { - groupMemberMetadataRW.topics(t -> - t.item(tp -> tp.topic(topic) - .partitions(p -> p.item(i -> i.partitionId(partitionId))))); - return this; + KafkaTopicsBuilder topicsBuilder = new KafkaTopicsBuilder(topic); + return topicsBuilder; } public byte[] build() @@ -616,76 +615,188 @@ public byte[] build() metadata.buffer().getBytes(metadata.offset(), array); return array; } + + class KafkaTopicsBuilder + { + private final MutableDirectBuffer topicBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final KafkaGroupTopicMetadataFW.Builder topicsRW = new KafkaGroupTopicMetadataFW.Builder(); + + KafkaTopicsBuilder( + String topic) + { + topicsRW.wrap(topicBuffer, 0, topicBuffer.capacity()); + topicsRW.topic(topic); + } + + public KafkaTopicsBuilder partitionId( + int partitionId) + { + topicsRW.partitionsItem(i -> i.partitionId(partitionId)); + return this; + } + + public KafkaGroupMemberMetadataBuilder build() + { + KafkaGroupTopicMetadataFW topic = topicsRW.build(); + groupMemberMetadataRW.topicsItem(i -> i.topic(topic.topic()).partitions(topic.partitions())); + + return KafkaGroupMemberMetadataBuilder.this; + } + } } - public static final class MemberAssignmentsBuilder + public static final class KafkaMemberAssignmentsBuilder { private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); - private final Array32FW.Builder memberAssignments = + private final Array32FW.Builder memberAssignmentsRW = new Array32FW.Builder(new MemberAssignmentFW.Builder(), new MemberAssignmentFW()); - public MemberAssignmentsBuilder() + public KafkaMemberAssignmentsBuilder() { - memberAssignments.wrap(writeBuffer, 0, writeBuffer.capacity()); + memberAssignmentsRW.wrap(writeBuffer, 0, writeBuffer.capacity()); } - public MemberAssignmentsBuilder member( - String memberId, - String topic, - int partitionId, - String consumerId, - int consumerPartitionId) + public KafkaMemberBuilder member( + String memberId) { - memberAssignments.item(ma -> - ma.memberId(memberId) - .assignments(ta -> ta.item(i -> - i.topic(topic) - .partitions(p -> p.item(tpa -> tpa.partitionId(partitionId))) - .userdata(u -> - u.item(ud -> ud - .consumerId(consumerId) - .partitions(pt -> pt.item(pi -> pi.partitionId(consumerPartitionId))))) - ))); - return this; + KafkaMemberBuilder member = new KafkaMemberBuilder(memberId); + return member; } public byte[] build() { - Array32FW members = memberAssignments.build(); + Array32FW members = memberAssignmentsRW.build(); final byte[] array = new byte[members.sizeof()]; members.buffer().getBytes(members.offset(), array); return array; } + + class KafkaMemberBuilder + { + private final MutableDirectBuffer memberBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final MemberAssignmentFW.Builder assignmentRW = new MemberAssignmentFW.Builder(); + private final MutableDirectBuffer topicAssignmentBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final Array32FW.Builder topicAssignmentsRW = + new Array32FW.Builder(new TopicAssignmentFW.Builder(), new TopicAssignmentFW()); + + KafkaMemberBuilder( + String memberId) + { + assignmentRW.wrap(memberBuffer, 0, memberBuffer.capacity()) + .memberId(memberId); + topicAssignmentsRW.wrap(topicAssignmentBuffer, 0, topicAssignmentBuffer.capacity()); + } + + public KafkaTopicAssignmentBuilder assignment() + { + KafkaTopicAssignmentBuilder assignment = new KafkaTopicAssignmentBuilder(); + return assignment; + } + + public KafkaMemberAssignmentsBuilder build() + { + Array32FW topicAssignments = topicAssignmentsRW.build(); + assignmentRW.assignments(topicAssignments); + MemberAssignmentFW assignment = assignmentRW.build(); + memberAssignmentsRW.item(m -> m.set(assignment)); + + return KafkaMemberAssignmentsBuilder.this; + } + + class KafkaTopicAssignmentBuilder + { + private final MutableDirectBuffer assignmentBuffer = new UnsafeBuffer(new byte[1024 * 8]); + TopicAssignmentFW.Builder assignmentRW = new TopicAssignmentFW.Builder(); + + KafkaTopicAssignmentBuilder() + { + assignmentRW.wrap(assignmentBuffer, 0, assignmentBuffer.capacity()); + } + + public KafkaTopicAssignmentBuilder topic( + String topic) + { + assignmentRW.topic(topic); + return this; + } + + public KafkaTopicAssignmentBuilder partitionId( + int partitionId) + { + assignmentRW.partitionsItem(p -> p.partitionId(partitionId)); + return this; + } + + public KafkaConsumerBuilder consumer() + { + KafkaConsumerBuilder consumerBuilder = new KafkaConsumerBuilder(); + return consumerBuilder; + } + + public KafkaMemberBuilder build() + { + TopicAssignmentFW assignment = assignmentRW.build(); + topicAssignmentsRW.item(i -> i + .topic(assignment.topic()) + .partitions(assignment.partitions()) + .userdata(assignment.userdata())); + return KafkaMemberBuilder.this; + } + + class KafkaConsumerBuilder + { + private final MutableDirectBuffer consumerBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final ConsumerAssignmentFW.Builder consumerRW = new ConsumerAssignmentFW.Builder(); + KafkaConsumerBuilder() + { + consumerRW.wrap(consumerBuffer, 0, consumerBuffer.capacity()); + } + + public KafkaConsumerBuilder id( + String id) + { + consumerRW.consumerId(id); + return this; + } + + public KafkaConsumerBuilder partitionId( + int partitionId) + { + consumerRW.partitionsItem(p -> p.partitionId(partitionId)); + return this; + } + + public KafkaTopicAssignmentBuilder build() + { + ConsumerAssignmentFW consumer = consumerRW.build(); + assignmentRW.userdataItem(u -> u + .consumerId(consumer.consumerId()) + .partitions(consumer.partitions())); + + return KafkaTopicAssignmentBuilder.this; + } + } + } + } } - public static final class TopicAssignmentsBuilder + public static final class KafkaTopicAssignmentsBuilder { private final MutableDirectBuffer writeBuffer = new UnsafeBuffer(new byte[1024 * 8]); private final Array32FW.Builder topicAssignments = new Array32FW.Builder(new TopicAssignmentFW.Builder(), new TopicAssignmentFW()); - public TopicAssignmentsBuilder() + public KafkaTopicAssignmentsBuilder() { topicAssignments.wrap(writeBuffer, 0, writeBuffer.capacity()); } - public TopicAssignmentsBuilder topic( - String topic, - int partitionId, - String consumerId, - int consumerPartitionId) - { - topicAssignments.item(i -> - i.topic(topic) - .partitions(p -> p.item(tpa -> tpa.partitionId(partitionId))) - .userdata(u -> - u.item(ud -> ud - .consumerId(consumerId) - .partitions(pt -> pt.item(pi -> pi.partitionId(consumerPartitionId))))) - ); - return this; + public KafkaTopicBuilder topic() + { + KafkaTopicBuilder kafkaTopicBuilder = new KafkaTopicBuilder(); + return kafkaTopicBuilder; } public byte[] build() @@ -695,6 +806,80 @@ public byte[] build() topics.buffer().getBytes(topics.offset(), array); return array; } + + class KafkaTopicBuilder + { + private final MutableDirectBuffer topicBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final TopicAssignmentFW.Builder topicAssignmentRW = new TopicAssignmentFW.Builder(); + KafkaTopicBuilder() + { + topicAssignmentRW.wrap(topicBuffer, 0, topicBuffer.capacity()); + } + + public KafkaTopicBuilder id( + String topic) + { + topicAssignmentRW.topic(topic); + return this; + } + + public KafkaTopicBuilder partitionId( + int partitionId) + { + topicAssignmentRW.partitionsItem(p -> p.partitionId(partitionId)); + return this; + } + + public KafkaConsumerBuilder consumer() + { + KafkaConsumerBuilder consumerBuilder = new KafkaConsumerBuilder(); + return consumerBuilder; + } + + public KafkaTopicAssignmentsBuilder build() + { + TopicAssignmentFW topicAssignment = topicAssignmentRW.build(); + topicAssignments.item(i -> i + .topic(topicAssignment.topic()) + .partitions(topicAssignment.partitions()) + .userdata(topicAssignment.userdata())); + return KafkaTopicAssignmentsBuilder.this; + } + + class KafkaConsumerBuilder + { + private final MutableDirectBuffer consumerBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final ConsumerAssignmentFW.Builder consumerRW = new ConsumerAssignmentFW.Builder(); + KafkaConsumerBuilder() + { + consumerRW.wrap(consumerBuffer, 0, consumerBuffer.capacity()); + } + + public KafkaConsumerBuilder id( + String id) + { + consumerRW.consumerId(id); + return this; + } + + public KafkaConsumerBuilder partitionId( + int partitionId) + { + consumerRW.partitionsItem(p -> p.partitionId(partitionId)); + return this; + } + + public KafkaTopicBuilder build() + { + ConsumerAssignmentFW consumer = consumerRW.build(); + topicAssignmentRW.userdataItem(u -> u + .consumerId(consumer.consumerId()) + .partitions(consumer.partitions())); + + return KafkaTopicBuilder.this; + } + } + } } public static final class KafkaBeginExBuilder @@ -1933,47 +2118,65 @@ public final class KafkaConsumerDataExBuilder { private final KafkaConsumerDataExFW.Builder consumerDataExRW = new KafkaConsumerDataExFW.Builder(); - private final MutableDirectBuffer partitionBuffer = new UnsafeBuffer(new byte[1024 * 8]); - private final MutableDirectBuffer assignmentBuffer = new UnsafeBuffer(new byte[1024 * 8]); - private final Array32FW.Builder partitionsRW = - new Array32FW.Builder<>(new KafkaTopicPartitionFW.Builder(), new KafkaTopicPartitionFW()); - - private final Array32FW.Builder assignmentsRW = - new Array32FW.Builder<>(new KafkaConsumerAssignmentFW.Builder(), new KafkaConsumerAssignmentFW()); - private KafkaConsumerDataExBuilder() { consumerDataExRW.wrap(writeBuffer, KafkaDataExFW.FIELD_OFFSET_CONSUMER, writeBuffer.capacity()); - partitionsRW.wrap(partitionBuffer, 0, partitionBuffer.capacity()); - assignmentsRW.wrap(assignmentBuffer, 0, assignmentBuffer.capacity()); } public KafkaConsumerDataExBuilder partition( int partitionId) { - partitionsRW.item(i -> i.partitionId(partitionId)); + consumerDataExRW.partitionsItem(p -> p.partitionId(partitionId)); return this; } - public KafkaConsumerDataExBuilder assignment( - String consumerId, - int partitionId) + public KafkaConsumerBuilder consumer() { - assignmentsRW.item(i -> i - .consumerId(consumerId) - .partitions(p -> p.item(tp -> tp.partitionId(partitionId)))); - - return this; + KafkaConsumerBuilder kafkaConsumerBuilder = new KafkaConsumerBuilder(); + return kafkaConsumerBuilder; } public KafkaDataExBuilder build() { - consumerDataExRW.partitions(partitionsRW.build()); - consumerDataExRW.assignments(assignmentsRW.build()); final KafkaConsumerDataExFW consumerDataEx = consumerDataExRW.build(); dataExRO.wrap(writeBuffer, 0, consumerDataEx.limit()); return KafkaDataExBuilder.this; } + + class KafkaConsumerBuilder + { + private final MutableDirectBuffer consumerBuffer = new UnsafeBuffer(new byte[1024 * 8]); + private final KafkaConsumerAssignmentFW.Builder consumerRW = new KafkaConsumerAssignmentFW.Builder(); + + KafkaConsumerBuilder() + { + consumerRW.wrap(consumerBuffer, 0, consumerBuffer.capacity()); + } + + public KafkaConsumerBuilder id( + String id) + { + consumerRW.consumerId(id); + return this; + } + + public KafkaConsumerBuilder partition( + int partitionId) + { + consumerRW.partitionsItem(p -> p.partitionId(partitionId)); + return this; + } + + public KafkaConsumerDataExBuilder build() + { + KafkaConsumerAssignmentFW consumer = consumerRW.build(); + consumerDataExRW.assignmentsItem(a -> a + .consumerId(consumer.consumerId()) + .partitions(consumer.partitions())); + + return KafkaConsumerDataExBuilder.this; + } + } } public final class KafkaOffsetFetchDataExBuilder diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt index 900ec59160..80bfb3dcb8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt @@ -35,6 +35,9 @@ read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .consumer() .partition(0) - .assignment("localhost:9092", 0) + .consumer() + .id("localhost:9092") + .partition(0) + .build() .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt index 17e8dc42c5..5a96250682 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt @@ -40,7 +40,10 @@ write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .consumer() .partition(0) - .assignment("localhost:9092", 0) + .consumer() + .id("localhost:9092") + .partition(0) + .build() .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/client.rpt new file mode 100644 index 0000000000..117d882635 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/client.rpt @@ -0,0 +1,83 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test-1") + .partition(0) + .partition(1) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .partition(1) + .consumer() + .id("localhost:9092") + .partition(0) + .partition(1) + .build() + .build() + .build()} + +read notify RECEIVED_ASSIGNMENT + +connect await RECEIVED_ASSIGNMENT + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test-2") + .partition(0) + .partition(1) + .partition(2) + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .partition(1) + .partition(2) + .consumer() + .id("localhost:9092") + .partition(0) + .partition(1) + .partition(2) + .build() + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/server.rpt new file mode 100644 index 0000000000..f3a3526f2f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/server.rpt @@ -0,0 +1,89 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test-1") + .partition(0) + .partition(1) + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .partition(1) + .consumer() + .id("localhost:9092") + .partition(0) + .partition(1) + .build() + .build() + .build()} +write zilla:data.empty +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .consumer() + .groupId("client-1") + .consumerId("localhost:9092") + .timeout(45000) + .topic("test-2") + .partition(0) + .partition(1) + .partition(2) + .build() + .build()} + +connected + + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .consumer() + .partition(0) + .partition(1) + .partition(2) + .consumer() + .id("localhost:9092") + .partition(0) + .partition(1) + .partition(2) + .build() + .build() + .build()} + +write zilla:data.empty +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt index 282b24d83b..20d1a8e7ef 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt @@ -26,7 +26,9 @@ write zilla:begin.ext ${kafka:beginEx() .timeout(45000) .metadata(kafka:memberMetadata() .consumerId("localhost:9092") - .topic("test", 0) + .topic("test") + .partitionId(0) + .build() .build()) .build() .build()} @@ -36,7 +38,7 @@ connected read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .group() - .groupId("test") + .groupId("client-1") .protocol("highlander") .timeout(30000) .build() @@ -49,16 +51,34 @@ read advised zilla:flush ${kafka:flushEx() .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() .consumerId("localhost:9092") - .topic("test", 0) + .topic("test") + .partitionId(0) + .build() .build()) .build() .build()} write ${kafka:memberAssignment() - .member("memberId-1", "test", 0, "localhost:9092", 0) - .build()} + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .consumer() + .id("localhost:9092") + .partitionId(0) + .build() + .build() + .build() + .build()} write flush read ${kafka:topicAssignment() - .topic("test", 0, "localhost:9092", 0) - .build()} + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("localhost:9092") + .partitionId(0) + .build() + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt index 8367f6fc54..c462ee5cf4 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt @@ -30,7 +30,9 @@ read zilla:begin.ext ${kafka:matchBeginEx() .timeout(45000) .metadata(kafka:memberMetadata() .consumerId("localhost:9092") - .topic("test", 0) + .topic("test") + .partitionId(0) + .build() .build()) .build() .build()} @@ -40,7 +42,7 @@ connected write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .group() - .groupId("test") + .groupId("client-1") .protocol("highlander") .timeout(30000) .build() @@ -54,16 +56,34 @@ write advise zilla:flush ${kafka:flushEx() .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() .consumerId("localhost:9092") - .topic("test", 0) + .topic("test") + .partitionId(0) + .build() .build()) .build() .build()} read ${kafka:memberAssignment() - .member("memberId-1", "test", 0, "localhost:9092", 0) - .build()} + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .consumer() + .id("localhost:9092") + .partitionId(0) + .build() + .build() + .build() + .build()} write ${kafka:topicAssignment() - .topic("test", 0, "localhost:9092", 0) + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("localhost:9092") + .partitionId(0) + .build() + .build() .build()} write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/client.rpt new file mode 100644 index 0000000000..afca9b834f --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/client.rpt @@ -0,0 +1,183 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +write ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test-1") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build() + .build()} +write flush + +read ${kafka:topicAssignment() + .topic() + .id("test-1") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .topic("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build()) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .topic("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build()) + .build() + .build()} + +write ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test-1") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .assignment() + .topic("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build() + .build() + .build()} +write flush + +read ${kafka:topicAssignment() + .topic() + .id("test-1") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .topic() + .id("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/server.rpt new file mode 100644 index 0000000000..1a5127dea3 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/server.rpt @@ -0,0 +1,188 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +read ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test-1") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build() + .build()} + +write ${kafka:topicAssignment() + .topic() + .id("test-1") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build()} +write flush + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .topic("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build()) + .build() + .build()} + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .topic("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build()) + .build() + .build()} + +read ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test-1") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .assignment() + .topic("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build() + .build() + .build()} + +write ${kafka:topicAssignment() + .topic() + .id("test-1") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .topic() + .id("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt index 1665ea0fac..9e5032849e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt @@ -118,13 +118,67 @@ write zilla:begin.ext ${kafka:beginEx() .groupId("client-1") .protocol("highlander") .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) .build() .build()} connected +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +write ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build() + .build()} +write flush + read ${kafka:topicAssignment() - .topic("test", 0, "localhost:9092", 0) + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("localhost:9092") + .partitionId(0) + .build() + .build() .build()} read notify RECEIVED_CONSUMER diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt index 179148d13d..77cc796546 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt @@ -117,14 +117,68 @@ read zilla:begin.ext ${kafka:matchBeginEx() .groupId("client-1") .protocol("highlander") .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) .build() .build()} connected +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("localhost:9092") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +read ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .partitionId(1) + .consumer() + .id("localhost:9092") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build() + .build()} + write ${kafka:topicAssignment() - .topic("test", 0, "localhost:9092", 0) - .build()} + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("localhost:9092") + .partitionId(0) + .build() + .build() + .build()} write flush accepted diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java index e81ca95da0..59e9c98f2d 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java @@ -105,23 +105,41 @@ public void setUp() throws Exception public void shouldGenerateMemberMetadata() { byte[] build = KafkaFunctions.memberMetadata() - .consumerId("localhost:9092") - .topic("test", 0) - .build(); + .consumerId("localhost:9092") + .topic("test-1") + .partitionId(0) + .partitionId(1) + .build() + .topic("test-2") + .partitionId(0) + .partitionId(1) + .partitionId(2) + .build() + .build(); DirectBuffer buffer = new UnsafeBuffer(build); KafkaGroupMemberMetadataFW memberMetadata = new KafkaGroupMemberMetadataFW().wrap(buffer, 0, buffer.capacity()); assertEquals("localhost:9092", memberMetadata.consumerId().asString()); + assertEquals(2, memberMetadata.topics().fieldCount()); } @Test public void shouldGenerateMemberAssignment() { byte[] build = KafkaFunctions.memberAssignment() - .member("memberId-1", "test", 0, "localhost:9092", 0) - .build(); + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .consumer() + .id("localhost:9092") + .partitionId(0) + .build() + .build() + .build() + .build(); DirectBuffer buffer = new UnsafeBuffer(build); Array32FW assignments = @@ -137,7 +155,14 @@ public void shouldGenerateMemberAssignment() public void shouldGenerateTopicAssignment() { byte[] build = KafkaFunctions.topicAssignment() - .topic("test", 0, "localhost:9092", 0) + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("localhost:9092") + .partitionId(0) + .build() + .build() .build(); DirectBuffer buffer = new UnsafeBuffer(build); @@ -4155,7 +4180,10 @@ public void shouldGenerateConsumerDataExtension() .typeId(0x03) .consumer() .partition(0) - .assignment("localhost:9092", 0) + .consumer() + .id("localhost:9092") + .partition(0) + .build() .build() .build(); diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java index 8cf378a166..2f0b411930 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/ConsumerIT.java @@ -44,4 +44,13 @@ public void shouldAssignPartition() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/reassign.new.topic/client", + "${app}/reassign.new.topic/server"}) + public void shouldReassignOnNewTopic() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java index 416825c4db..6955a66e91 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java @@ -107,4 +107,13 @@ public void shouldAssignGroupPartition() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/reassign.new.topic/client", + "${app}/reassign.new.topic/server"}) + public void shouldReassignOnNewTopic() throws Exception + { + k3po.finish(); + } } From 082bee753eb0bf456263117b95195c41ccba88e7 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Wed, 13 Sep 2023 17:26:23 -0700 Subject: [PATCH 083/115] Add test to validate merge produce rejection on wrong partition (#410) --- .../KafkaCacheClientConsumerFactory.java | 29 ++- .../KafkaCacheServerConsumerFactory.java | 14 +- .../kafka/internal/stream/CacheMergedIT.java | 10 + .../consumer/partition.assignment/client.rpt | 4 +- .../consumer/partition.assignment/server.rpt | 4 +- .../consumer/reassign.new.topic/client.rpt | 8 +- .../consumer/reassign.new.topic/server.rpt | 8 +- .../group/partition.assignment/client.rpt | 8 +- .../group/partition.assignment/server.rpt | 8 +- .../group/reassign.new.topic/client.rpt | 20 +- .../group/reassign.new.topic/server.rpt | 20 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 50 ++++ .../server.rpt | 54 +++++ .../client.rpt | 8 +- .../server.rpt | 8 +- .../client.rpt | 222 ++++++++++++++++++ .../server.rpt | 207 ++++++++++++++++ .../kafka/streams/application/MergedIT.java | 18 ++ 20 files changed, 649 insertions(+), 55 deletions(-) create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.invalid.partition/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.invalid.partition/server.rpt diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java index 67c5f7b8b6..4ade212901 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java @@ -160,8 +160,8 @@ public MessageConsumer newStream( if (fan == null) { KafkaCacheClientConsumerFan newFan = - new KafkaCacheClientConsumerFan(routedId, resolvedId, authorization, groupId, - topic, consumerId, partitions, timeout); + new KafkaCacheClientConsumerFan(routedId, resolvedId, authorization, fanKey, + groupId, topic, consumerId, partitions, timeout); fan = newFan; clientConsumerFansByConsumer.put(fanKey, fan); } @@ -386,6 +386,7 @@ final class KafkaCacheClientConsumerFan private final long originId; private final long routedId; private final long authorization; + private final String fanKey; private final String groupId; private final String topic; private final String consumerId; @@ -414,6 +415,7 @@ private KafkaCacheClientConsumerFan( long originId, long routedId, long authorization, + String fanKey, String groupId, String topic, String consumerId, @@ -428,6 +430,7 @@ private KafkaCacheClientConsumerFan( this.consumerId = consumerId; this.partitions = partitions; this.timeout = timeout; + this.fanKey = fanKey; this.members = new ArrayList<>(); this.assignedPartitions = new IntHashSet(); this.assignments = new Object2ObjectHashMap<>(); @@ -483,11 +486,23 @@ private void onConsumerFanMemberClosed( if (members.isEmpty()) { - doConsumerFanInitialEndIfNecessary(traceId); - doConsumerFanReplyResetIfNecessary(traceId); + cleanup(traceId); } } + private void cleanup(long traceId) + { + doConsumerFanInitialEndIfNecessary(traceId); + doConsumerFanReplyResetIfNecessary(traceId); + } + + private void onConsumerFanClosed( + long traceId) + { + clientConsumerFansByConsumer.remove(this.fanKey); + cleanup(traceId); + } + private void doConsumerFanInitialBeginIfNecessary( long traceId) { @@ -574,6 +589,8 @@ private void onConsumerFanInitialReset( doConsumerFanReplyResetIfNecessary(traceId); members.forEach(s -> s.doConsumerInitialResetIfNecessary(traceId)); + + onConsumerFanClosed(traceId); } private void onConsumerFanInitialWindow( @@ -693,6 +710,8 @@ private void onConsumerFanReplyEnd( doConsumerFanInitialEndIfNecessary(traceId); members.forEach(s -> s.doConsumerReplyEndIfNecessary(traceId)); + + onConsumerFanClosed(traceId); } private void onConsumerFanReplyAbort( @@ -705,6 +724,8 @@ private void onConsumerFanReplyAbort( doConsumerFanInitialAbortIfNecessary(traceId); members.forEach(s -> s.doConsumerReplyAbortIfNecessary(traceId)); + + onConsumerFanClosed(traceId); } private void doConsumerFanReplyResetIfNecessary( diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java index f658b13867..5366a193bf 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java @@ -748,9 +748,11 @@ private void onConsumerInitialReset( assert this.initialAck <= this.initialSeq; - streams.forEach(m -> m.doConsumerInitialReset(traceId)); + streams.forEach(m -> m.cleanup(traceId)); doConsumerReplyReset(traceId); + + onConsumerFanClosed(traceId); } @@ -961,6 +963,8 @@ private void onConsumerReplyEnd( streams.forEach(s -> s.doConsumerReplyEnd(traceId)); doConsumerInitialEnd(traceId); + + onConsumerFanClosed(traceId); } private void onConsumerReplyAbort( @@ -981,6 +985,8 @@ private void onConsumerReplyAbort( streams.forEach(s -> s.cleanup(traceId)); doConsumerInitialAbort(traceId); + + onConsumerFanClosed(traceId); } private void doConsumerReplyReset( @@ -1079,6 +1085,12 @@ private void doMemberAssigment( EMPTY_OCTETS.buffer(), EMPTY_OCTETS.offset(), EMPTY_OCTETS.sizeof(), EMPTY_OCTETS); } } + + private void onConsumerFanClosed( + long traceId) + { + clientConsumerFansByGroupId.remove(this.groupId); + } } final class KafkaCacheServerConsumerStream diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java index ded800e0db..7258a7122e 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java @@ -595,4 +595,14 @@ public void shouldFetchGroupMessageValue() throws Exception { k3po.finish(); } + + @Test + @Configuration("cache.options.merged.yaml") + @Specification({ + "${app}/merged.group.produce.invalid.partition/client", + "${app}/unmerged.group.produce.invalid.partition/server"}) + public void shouldRejectMessageForInvalidPartition() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt index 80bfb3dcb8..fb08daee14 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .consumer() .groupId("client-1") - .consumerId("localhost:9092") + .consumerId("consumer-1") .timeout(45000) .topic("test") .partition(0) @@ -36,7 +36,7 @@ read zilla:data.ext ${kafka:dataEx() .consumer() .partition(0) .consumer() - .id("localhost:9092") + .id("consumer-1") .partition(0) .build() .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt index 5a96250682..884f25b101 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt @@ -26,7 +26,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .consumer() .groupId("client-1") - .consumerId("localhost:9092") + .consumerId("consumer-1") .timeout(45000) .topic("test") .partition(0) @@ -41,7 +41,7 @@ write zilla:data.ext ${kafka:dataEx() .consumer() .partition(0) .consumer() - .id("localhost:9092") + .id("consumer-1") .partition(0) .build() .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/client.rpt index 117d882635..ceda6ed347 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/client.rpt @@ -22,7 +22,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .consumer() .groupId("client-1") - .consumerId("localhost:9092") + .consumerId("consumer-1") .timeout(45000) .topic("test-1") .partition(0) @@ -38,7 +38,7 @@ read zilla:data.ext ${kafka:dataEx() .partition(0) .partition(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partition(0) .partition(1) .build() @@ -56,7 +56,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .consumer() .groupId("client-1") - .consumerId("localhost:9092") + .consumerId("consumer-1") .timeout(45000) .topic("test-2") .partition(0) @@ -74,7 +74,7 @@ read zilla:data.ext ${kafka:dataEx() .partition(1) .partition(2) .consumer() - .id("localhost:9092") + .id("consumer-1") .partition(0) .partition(1) .partition(2) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/server.rpt index f3a3526f2f..43e2750393 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/reassign.new.topic/server.rpt @@ -26,7 +26,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .consumer() .groupId("client-1") - .consumerId("localhost:9092") + .consumerId("consumer-1") .timeout(45000) .topic("test-1") .partition(0) @@ -43,7 +43,7 @@ write zilla:data.ext ${kafka:dataEx() .partition(0) .partition(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partition(0) .partition(1) .build() @@ -58,7 +58,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .consumer() .groupId("client-1") - .consumerId("localhost:9092") + .consumerId("consumer-1") .timeout(45000) .topic("test-2") .partition(0) @@ -77,7 +77,7 @@ write zilla:data.ext ${kafka:dataEx() .partition(1) .partition(2) .consumer() - .id("localhost:9092") + .id("consumer-1") .partition(0) .partition(1) .partition(2) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt index 20d1a8e7ef..a97e828e3b 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt @@ -25,7 +25,7 @@ write zilla:begin.ext ${kafka:beginEx() .protocol("highlander") .timeout(45000) .metadata(kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test") .partitionId(0) .build() @@ -50,7 +50,7 @@ read advised zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test") .partitionId(0) .build() @@ -64,7 +64,7 @@ write ${kafka:memberAssignment() .topic("test") .partitionId(0) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .build() .build() @@ -77,7 +77,7 @@ read ${kafka:topicAssignment() .id("test") .partitionId(0) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .build() .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt index c462ee5cf4..2501063a00 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .protocol("highlander") .timeout(45000) .metadata(kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test") .partitionId(0) .build() @@ -55,7 +55,7 @@ write advise zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test") .partitionId(0) .build() @@ -69,7 +69,7 @@ read ${kafka:memberAssignment() .topic("test") .partitionId(0) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .build() .build() @@ -81,7 +81,7 @@ write ${kafka:topicAssignment() .id("test") .partitionId(0) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .build() .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/client.rpt index afca9b834f..f1b7fa3a53 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/client.rpt @@ -25,7 +25,7 @@ write zilla:begin.ext ${kafka:beginEx() .protocol("highlander") .timeout(45000) .metadata(kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test-1") .partitionId(0) .partitionId(1) @@ -51,7 +51,7 @@ read advised zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test-1") .partitionId(0) .partitionId(1) @@ -67,7 +67,7 @@ write ${kafka:memberAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -82,7 +82,7 @@ read ${kafka:topicAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -95,7 +95,7 @@ write advise zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test-1") .partitionId(0) .partitionId(1) @@ -115,7 +115,7 @@ read advised zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test-1") .partitionId(0) .partitionId(1) @@ -136,7 +136,7 @@ write ${kafka:memberAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -147,7 +147,7 @@ write ${kafka:memberAssignment() .partitionId(1) .partitionId(2) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .partitionId(2) @@ -163,7 +163,7 @@ read ${kafka:topicAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -174,7 +174,7 @@ read ${kafka:topicAssignment() .partitionId(1) .partitionId(2) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .partitionId(2) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/server.rpt index 1a5127dea3..e7843e06a0 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/reassign.new.topic/server.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .protocol("highlander") .timeout(45000) .metadata(kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test-1") .partitionId(0) .partitionId(1) @@ -56,7 +56,7 @@ write advise zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test-1") .partitionId(0) .partitionId(1) @@ -72,7 +72,7 @@ read ${kafka:memberAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -86,7 +86,7 @@ write ${kafka:topicAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -100,7 +100,7 @@ read advised zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test-1") .partitionId(0) .partitionId(1) @@ -120,7 +120,7 @@ write advise zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test-1") .partitionId(0) .partitionId(1) @@ -141,7 +141,7 @@ read ${kafka:memberAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -152,7 +152,7 @@ read ${kafka:memberAssignment() .partitionId(1) .partitionId(2) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .partitionId(2) @@ -167,7 +167,7 @@ write ${kafka:topicAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -178,7 +178,7 @@ write ${kafka:topicAssignment() .partitionId(1) .partitionId(2) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .partitionId(2) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt index dc96fdf72f..cb654e6997 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/client.rpt @@ -24,7 +24,7 @@ write zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("test") .groupId("client-1") - .consumerId("localhost:9092") + .consumerId("consumer-1") .timeout(45000) .partition(0, 1) .partition(1, 1) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt index cadddb5324..ee3acc2978 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.fetch.message.value/server.rpt @@ -29,7 +29,7 @@ read zilla:begin.ext ${kafka:beginEx() .capabilities("FETCH_ONLY") .topic("test") .groupId("client-1") - .consumerId("localhost:9092") + .consumerId("consumer-1") .timeout(45000) .partition(0, 1) .partition(1, 1) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/client.rpt new file mode 100644 index 0000000000..a37665b86d --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/client.rpt @@ -0,0 +1,50 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .groupId("client-1") + .consumerId("consumer-1") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(1, 1) + .build() + .build()} +write "Hello, world #A1" +write flush + + +read zilla:reset.ext ${kafka:resetEx() + .typeId(zilla:id("kafka")) + .error(0) + .consumerId("consumer-2") + .build()} + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/server.rpt new file mode 100644 index 0000000000..bc7bde33c7 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/server.rpt @@ -0,0 +1,54 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .groupId("client-1") + .consumerId("consumer-1") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .partition(1, 1) + .build() + .build()} +read "Hello, world #A1" + +write zilla:reset.ext ${kafka:resetEx() + .typeId(zilla:id("kafka")) + .error(0) + .consumerId("consumer-2") + .build()} + +read abort diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt index 9e5032849e..05d9edd9da 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/client.rpt @@ -119,7 +119,7 @@ write zilla:begin.ext ${kafka:beginEx() .protocol("highlander") .timeout(45000) .metadata(kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test") .partitionId(0) .partitionId(1) @@ -145,7 +145,7 @@ read advised zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test") .partitionId(0) .partitionId(1) @@ -161,7 +161,7 @@ write ${kafka:memberAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -175,7 +175,7 @@ read ${kafka:topicAssignment() .id("test") .partitionId(0) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .build() .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt index 77cc796546..180178cfba 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.value/server.rpt @@ -118,7 +118,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .protocol("highlander") .timeout(45000) .metadata(kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test") .partitionId(0) .partitionId(1) @@ -145,7 +145,7 @@ write advise zilla:flush ${kafka:flushEx() .leaderId("memberId-1") .memberId("memberId-1") .members("memberId-1", kafka:memberMetadata() - .consumerId("localhost:9092") + .consumerId("consumer-1") .topic("test") .partitionId(0) .partitionId(1) @@ -161,7 +161,7 @@ read ${kafka:memberAssignment() .partitionId(0) .partitionId(1) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .partitionId(1) .build() @@ -174,7 +174,7 @@ write ${kafka:topicAssignment() .id("test") .partitionId(0) .consumer() - .id("localhost:9092") + .id("consumer-1") .partitionId(0) .build() .build() diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.invalid.partition/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.invalid.partition/client.rpt new file mode 100644 index 0000000000..1acf4ae01b --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.invalid.partition/client.rpt @@ -0,0 +1,222 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .build() + .build()} +read notify PARTITION_COUNT_2 + +connect await PARTITION_COUNT_2 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("consumer-1") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("consumer-1") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +write ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .partitionId(1) + .consumer() + .id("consumer-1") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build() + .build()} +write flush + +read ${kafka:topicAssignment() + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("consumer-1") + .partitionId(0) + .build() + .consumer() + .id("consumer-2") + .partitionId(1) + .build() + .build() + .build()} + +read notify RECEIVED_CONSUMER + +connect await RECEIVED_CONSUMER + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 1 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .deferred(10240 - 8192 + padding) + .timestamp(newTimestamp) + .sequence(1) + .ackMode("LEADER_ONLY") + .build() + .build()} +write ${kafka:randomBytes(10240)} +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.invalid.partition/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.invalid.partition/server.rpt new file mode 100644 index 0000000000..d6cb1fba53 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.invalid.partition/server.rpt @@ -0,0 +1,207 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} +property padding 0 + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(0) + .metadata(kafka:memberMetadata() + .consumerId("consumer-1") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("consumer-1") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +read ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .partitionId(1) + .consumer() + .id("consumer-1") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build() + .build()} + +write ${kafka:topicAssignment() + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("consumer-1") + .partitionId(0) + .build() + .consumer() + .id("consumer-2") + .partitionId(1) + .build() + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} +write flush diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java index e019773402..fbc593ab21 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java @@ -665,4 +665,22 @@ public void shouldFetchGroupUnmergedMessage() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/unmerged.group.produce.invalid.partition/client", + "${app}/unmerged.group.produce.invalid.partition/server"}) + public void shouldRejectUnmergedMessageForInvalidPartition() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/merged.group.produce.invalid.partition/client", + "${app}/merged.group.produce.invalid.partition/server"}) + public void shouldRejectMergedMessageForInvalidPartition() throws Exception + { + k3po.finish(); + } } From 365045aa3808f09e58da44c2d2a3dee64dc9351c Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Thu, 14 Sep 2023 22:51:56 -0700 Subject: [PATCH 084/115] Fix consumer assignment causing decoding issue (#414) --- .../KafkaCacheServerConsumerFactory.java | 45 ++-- .../stream/KafkaClientGroupFactory.java | 201 +++++++++++++++--- .../binding-kafka/src/main/zilla/protocol.idl | 20 +- .../consumer/partition.assignment/client.rpt | 7 +- .../consumer/partition.assignment/server.rpt | 7 +- .../group/partition.assignment/client.rpt | 69 ++++-- .../group/partition.assignment/server.rpt | 45 +++- 7 files changed, 308 insertions(+), 86 deletions(-) diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java index 5366a193bf..01541a6904 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java @@ -1031,6 +1031,8 @@ private void doPartitionAssignment( int partitionIndex = 0; int newPartitionPerTopic = numberOfPartitionsPerMember + extraPartition; + IntHashSet.IntIterator iterator = p.iterator(); + for (String member : members.keySet()) { String consumerId = members.get(member); @@ -1038,10 +1040,10 @@ private void doPartitionAssignment( member, tp -> new ArrayList<>()); IntHashSet partitions = new IntHashSet(); - IntHashSet.IntIterator iterator = p.iterator(); for (; partitionIndex < newPartitionPerTopic; partitionIndex++) { - partitions.add(iterator.next()); + final int partitionId = iterator.nextValue(); + partitions.add(partitionId); } topicPartitions.add(new TopicPartition(consumerId, t, partitions)); @@ -1059,24 +1061,29 @@ private void doMemberAssigment( { if (!consumers.isEmpty()) { - Array32FW assignment = memberAssignmentRW - .wrap(writeBuffer, DataFW.FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) - .item(ma -> this.consumers.forEach((k, v) -> - ma.memberId(k) - .assignments(ta -> v.forEach(tp -> ta.item(i -> - i.topic(tp.topic) - .partitions(p -> tp.partitions.forEach(t -> p.item(tpa -> tpa.partitionId(t)))) - .userdata(u -> - this.consumers.forEach((ak, av) -> - av.stream().filter(atp -> atp.topic.equals(tp.topic)).forEach(at -> - u.item(ud -> ud - .consumerId(at.consumerId) - .partitions(pt -> at.partitions.forEach(up -> - pt.item(pi -> pi.partitionId(up)))))))) - ))))) - .build(); + Array32FW.Builder assignmentBuilder = memberAssignmentRW + .wrap(writeBuffer, DataFW.FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()); - doConsumerInitialData(traceId, authorization, initialBud, memberAssignmentRW.sizeof(), 3, + this.consumers.forEach((k, v) -> + { + assignmentBuilder.item(ma -> ma + .memberId(k) + .assignments(ta -> v.forEach(tp -> ta.item(i -> i + .topic(tp.topic) + .partitions(p -> tp.partitions.forEach(t -> p.item(tpa -> tpa.partitionId(t)))) + .userdata(u -> + this.consumers.forEach((ak, av) -> av + .stream().filter(atp -> atp.topic.equals(tp.topic)).forEach(at -> + u.item(ud -> ud + .consumerId(at.consumerId) + .partitions(pt -> at.partitions.forEach(up -> + pt.item(pi -> pi.partitionId(up)))))))) + )))); + }); + + Array32FW assignment = assignmentBuilder.build(); + + doConsumerInitialData(traceId, authorization, initialBud, assignment.sizeof(), 3, assignment.buffer(), assignment.offset(), assignment.sizeof(), EMPTY_OCTETS); } else diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index 5d44a6ebe4..a769bf0733 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -55,9 +55,13 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.DescribeConfigsResponseFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.config.ResourceResponseFW; -import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerAssignmentMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerAssignmentUserdataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerMetadataTopicFW; -import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerUserdataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerPartitionFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerSubscriptionMetadataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerSubscriptionUserdataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.consumer.ConsumerTopicPartitionFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.AssignmentFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.FindCoordinatorRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.FindCoordinatorResponseFW; @@ -72,8 +76,10 @@ import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.ProtocolMetadataFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.SyncGroupRequestFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.group.SyncGroupResponseFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.ConsumerAssignmentFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.MemberAssignmentFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.TopicAssignmentFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.rebalance.TopicPartitionFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; @@ -171,9 +177,18 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final HeartbeatRequestFW.Builder heartbeatRequestRW = new HeartbeatRequestFW.Builder(); private final LeaveGroupRequestFW.Builder leaveGroupRequestRW = new LeaveGroupRequestFW.Builder(); private final LeaveMemberFW.Builder leaveMemberRW = new LeaveMemberFW.Builder(); - private final ConsumerMetadataFW.Builder groupMemberMetadataRW = new ConsumerMetadataFW.Builder(); - private final ConsumerMetadataTopicFW.Builder groupMetadataTopicRW = new ConsumerMetadataTopicFW.Builder(); - private final ConsumerUserdataFW.Builder groupUserdataRW = new ConsumerUserdataFW.Builder(); + private final ConsumerSubscriptionMetadataFW.Builder groupSubscriptionMetadataRW = + new ConsumerSubscriptionMetadataFW.Builder(); + private final ConsumerAssignmentMetadataFW.Builder assignmentMetadataRW = new ConsumerAssignmentMetadataFW.Builder(); + private final ConsumerMetadataTopicFW.Builder metadataTopicRW = new ConsumerMetadataTopicFW.Builder(); + private final ConsumerTopicPartitionFW.Builder topicPartitionRW = new ConsumerTopicPartitionFW.Builder(); + private final ConsumerPartitionFW.Builder partitionRW = new ConsumerPartitionFW.Builder(); + private final ConsumerSubscriptionUserdataFW.Builder subscriptionUserdataRW = + new ConsumerSubscriptionUserdataFW.Builder(); + private final ConsumerAssignmentUserdataFW.Builder assignmentUserdataRW = + new ConsumerAssignmentUserdataFW.Builder(); + private final Array32FW memberAssignmentRO = + new Array32FW<>(new MemberAssignmentFW()); private final ResourceResponseFW resourceResponseRO = new ResourceResponseFW(); private final ConfigResponseFW configResponseRO = new ConfigResponseFW(); @@ -186,11 +201,16 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final HeartbeatResponseFW heartbeatResponseRO = new HeartbeatResponseFW(); private final LeaveGroupResponseFW leaveGroupResponseRO = new LeaveGroupResponseFW(); private final LeaveMemberFW leaveMemberRO = new LeaveMemberFW(); - private final Array32FW memberAssignmentRO = - new Array32FW<>(new MemberAssignmentFW()); - private final ConsumerMetadataFW groupMemberMetadataRO = new ConsumerMetadataFW(); - private final ConsumerMetadataTopicFW groupMetadataTopicRO = new ConsumerMetadataTopicFW(); - private final ConsumerUserdataFW groupUserdataRO = new ConsumerUserdataFW(); + private final Array32FW.Builder topicPartitionsRW = + new Array32FW.Builder<>(new TopicAssignmentFW.Builder(), new TopicAssignmentFW()); + private final ConsumerSubscriptionMetadataFW subscriptionMetadataRO = new ConsumerSubscriptionMetadataFW(); + private final ConsumerAssignmentMetadataFW assignmentMetadataRO = new ConsumerAssignmentMetadataFW(); + private final ConsumerMetadataTopicFW metadataTopicRO = new ConsumerMetadataTopicFW(); + private final ConsumerSubscriptionUserdataFW subscriptionUserdataRO = new ConsumerSubscriptionUserdataFW(); + private final ConsumerAssignmentUserdataFW assignmentUserdataRO = new ConsumerAssignmentUserdataFW(); + private final ConsumerTopicPartitionFW topicPartitionRO = new ConsumerTopicPartitionFW(); + private final ConsumerPartitionFW partitionRO = new ConsumerPartitionFW(); + private final Array32FW assignmentConsumersRO = new Array32FW<>(new ConsumerAssignmentFW()); private final KafkaGroupMemberMetadataFW kafkaMemberMetadataRO = new KafkaGroupMemberMetadataFW(); @@ -1509,15 +1529,16 @@ private void doApplicationBegin( private void doApplicationData( long traceId, long authorization, - OctetsFW payload) + DirectBuffer buffer, + int offset, + int length) { final int reserved = replyPad; - if (payload.sizeof() > 0) + if (length > 0) { doData(application, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, replyBudgetId, reserved, - payload.value(), 0, payload.sizeof(), EMPTY_EXTENSION); + traceId, authorization, replyBudgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); } else { @@ -3437,8 +3458,8 @@ private void doEncodeJoinGroupRequest( encodeProgress = joinGroupRequest.limit(); - final int metadataLimit = delegate.topicMetadataLimit > 0 ? doGenerateMembersMetadata() : - doGenerateEmptyMetadata(); + final int metadataLimit = delegate.topicMetadataLimit > 0 ? doGenerateSubscriptionMetadata() : + doGenerateEmptySubscriptionMetadata(); final ProtocolMetadataFW protocolMetadata = protocolMetadataRW.wrap(encodeBuffer, encodeProgress, encodeLimit) @@ -3466,7 +3487,7 @@ private void doEncodeJoinGroupRequest( delegate.doApplicationBeginIfNecessary(traceId, authorization); } - private int doGenerateMembersMetadata() + private int doGenerateSubscriptionMetadata() { final MutableDirectBuffer encodeBuffer = extBuffer; final int encodeOffset = 0; @@ -3477,7 +3498,7 @@ private int doGenerateMembersMetadata() KafkaGroupMemberMetadataFW memberMetadata = kafkaMemberMetadataRO .wrap(delegate.metadataBuffer, 0, delegate.topicMetadataLimit); - ConsumerMetadataFW metadata = groupMemberMetadataRW + ConsumerSubscriptionMetadataFW metadata = groupSubscriptionMetadataRW .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .version(METADATA_LOWEST_VERSION) .metadataTopicCount(memberMetadata.topics().fieldCount()) @@ -3487,7 +3508,7 @@ private int doGenerateMembersMetadata() memberMetadata.topics().forEach(t -> { - ConsumerMetadataTopicFW metadataTopic = groupMetadataTopicRW + ConsumerMetadataTopicFW metadataTopic = metadataTopicRW .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .name(t.topic()) .build(); @@ -3496,7 +3517,7 @@ private int doGenerateMembersMetadata() memberMetadata.topics().forEach(t -> { - final ConsumerUserdataFW userdata = groupUserdataRW + final ConsumerSubscriptionUserdataFW userdata = subscriptionUserdataRW .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .userdata(delegate.metadataBuffer, 0, delegate.topicMetadataLimit) .ownedPartitions(0) @@ -3508,7 +3529,7 @@ private int doGenerateMembersMetadata() return encodeProgress.get(); } - private int doGenerateEmptyMetadata() + private int doGenerateEmptySubscriptionMetadata() { final MutableDirectBuffer encodeBuffer = extBuffer; final int encodeOffset = 0; @@ -3516,7 +3537,7 @@ private int doGenerateEmptyMetadata() final MutableInteger encodeProgress = new MutableInteger(encodeOffset); - ConsumerMetadataFW metadata = groupMemberMetadataRW + ConsumerSubscriptionMetadataFW metadata = groupSubscriptionMetadataRW .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .version(METADATA_LOWEST_VERSION) .metadataTopicCount(0) @@ -3524,7 +3545,7 @@ private int doGenerateEmptyMetadata() encodeProgress.set(metadata.limit()); - final ConsumerUserdataFW userdata = groupUserdataRW + final ConsumerSubscriptionUserdataFW userdata = subscriptionUserdataRW .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .userdata(delegate.metadataBuffer, 0, delegate.topicMetadataLimit) .ownedPartitions(0) @@ -3535,6 +3556,56 @@ private int doGenerateEmptyMetadata() return encodeProgress.get(); } + private int doGenerateAssignmentMetadata( + Array32FW topicPartitions, + int progressOffset) + { + final MutableDirectBuffer encodeBuffer = extBuffer; + final int encodeOffset = progressOffset; + final int encodeLimit = encodeBuffer.capacity(); + + final MutableInteger encodeProgress = new MutableInteger(encodeOffset); + + ConsumerAssignmentMetadataFW metadata = assignmentMetadataRW + .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .version(METADATA_LOWEST_VERSION) + .metadataTopicCount(topicPartitions.fieldCount()) + .build(); + + encodeProgress.set(metadata.limit()); + + topicPartitions.forEach(t -> + { + final Array32FW partitions = t.partitions(); + + ConsumerTopicPartitionFW topicPartition = topicPartitionRW + .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .topic(t.topic()) + .partitionCount(partitions.fieldCount()) + .build(); + encodeProgress.set(topicPartition.limit()); + + partitions.forEach(p -> + { + ConsumerPartitionFW partition = partitionRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .partitionId(p.partitionId()) + .build(); + encodeProgress.set(partition.limit()); + }); + + Array32FW assignmentUserdata = t.userdata(); + final ConsumerAssignmentUserdataFW userdata = assignmentUserdataRW + .wrap(encodeBuffer, encodeProgress.get(), encodeLimit) + .userdata(assignmentUserdata.buffer(), assignmentUserdata.offset(), assignmentUserdata.sizeof()) + .build(); + + encodeProgress.set(userdata.limit()); + + }); + + return encodeProgress.get(); + } + private void doEncodeSyncGroupRequest( long traceId, long budgetId) @@ -3573,16 +3644,20 @@ private void doEncodeSyncGroupRequest( Array32FW assignments = memberAssignmentRO .wrap(assignment.buffer(), assignment.offset(), assignment.limit()); + MutableInteger progressOffset = new MutableInteger(); assignments.forEach(a -> { Array32FW topicPartitions = a.assignments(); - final AssignmentFW groupAssignment = + + int newProgressOffset = doGenerateAssignmentMetadata(topicPartitions, progressOffset.get()); + final AssignmentFW memberAssignment = assignmentRW.wrap(encodeBuffer, encodeProgress.get(), encodeLimit) .memberId(a.memberId()) - .value(topicPartitions.buffer(), topicPartitions.offset(), topicPartitions.sizeof()) + .value(extBuffer, progressOffset.get(), newProgressOffset) .build(); - encodeProgress.set(groupAssignment.limit()); + encodeProgress.set(memberAssignment.limit()); + progressOffset.set(newProgressOffset); }); } else @@ -4000,17 +4075,17 @@ private void onJoinGroupResponse( int progress = 0; - ConsumerMetadataFW newGroupMetadata = groupMemberMetadataRO + ConsumerSubscriptionMetadataFW newGroupMetadata = subscriptionMetadataRO .wrap(buffer, 0, metadata.sizeof()); progress = newGroupMetadata.limit(); for (int i = 0; i < newGroupMetadata.metadataTopicCount(); i++) { - ConsumerMetadataTopicFW topic = groupMetadataTopicRO.wrap(buffer, progress, limit); + ConsumerMetadataTopicFW topic = metadataTopicRO.wrap(buffer, progress, limit); progress = topic.limit(); } - ConsumerUserdataFW userdata = groupUserdataRO.wrap(buffer, progress, limit); + ConsumerSubscriptionUserdataFW userdata = subscriptionUserdataRO.wrap(buffer, progress, limit); gm.item(i -> { @@ -4041,19 +4116,75 @@ private void onSynGroupRebalance( private void onSyncGroupResponse( long traceId, long authorization, - OctetsFW assignment) + OctetsFW newAssignment) { nextResponseId++; - delegate.doApplicationData(traceId, authorization, assignment); + if (newAssignment.sizeof() > 0) + { + Array32FW.Builder topicAssignmentBuilder = + topicPartitionsRW.wrap(extBuffer, 0, extBuffer.capacity()); + + final DirectBuffer buffer = newAssignment.value(); + final int limit = newAssignment.sizeof(); + + MutableInteger progress = new MutableInteger(); + + final ConsumerAssignmentMetadataFW assignment = assignmentMetadataRO.wrap(buffer, progress.get(), limit); + progress.set(assignment.limit()); + + for (int i = 0; i < assignment.metadataTopicCount(); i++) + { + ConsumerTopicPartitionFW topicPartition = topicPartitionRO + .wrap(buffer, progress.get(), limit); + + progress.set(topicPartition.limit()); + + topicAssignmentBuilder.item(ta -> + { + ta.topic(topicPartition.topic()); + int partitionCount = topicPartition.partitionCount(); + for (int t = 0; t < partitionCount; t++) + { + ConsumerPartitionFW partition = partitionRO.wrap(buffer, progress.get(), limit); + progress.set(partition.limit()); + + ta.partitionsItem(p -> p.partitionId(partition.partitionId())); + } + + ConsumerAssignmentUserdataFW assignmentUserdata = + assignmentUserdataRO.wrap(buffer, progress.get(), limit); + OctetsFW userdata = assignmentUserdata.userdata(); - if (heartbeatRequestId == NO_CANCEL_ID) + progress.set(assignmentUserdata.limit()); + + assignmentConsumersRO.wrap(userdata.value(), 0, userdata.sizeof()); + ta.userdata(assignmentConsumersRO); + }); + + } + + Array32FW topicAssignment = topicAssignmentBuilder.build(); + + delegate.doApplicationData(traceId, authorization, topicAssignment.buffer(), topicAssignment.offset(), + topicAssignment.sizeof()); + } + else { - encoder = encodeHeartbeatRequest; + delegate.doApplicationData(traceId, authorization, EMPTY_OCTETS.buffer(), EMPTY_OCTETS.offset(), + EMPTY_OCTETS.sizeof()); + } - heartbeatRequestId = signaler.signalAt(currentTimeMillis() + delegate.timeout / 2, - originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + if (heartbeatRequestId != NO_CANCEL_ID) + { + signaler.cancel(heartbeatRequestId); + heartbeatRequestId = NO_CANCEL_ID; } + + encoder = encodeHeartbeatRequest; + + heartbeatRequestId = signaler.signalAt(currentTimeMillis() + delegate.timeout / 2, + originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); } private void onHeartbeatResponse( diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl index a7e56952be..66afcfdbf8 100644 --- a/runtime/binding-kafka/src/main/zilla/protocol.idl +++ b/runtime/binding-kafka/src/main/zilla/protocol.idl @@ -540,30 +540,42 @@ scope protocol scope consumer { - struct ConsumerTopicPartition + struct ConsumerPartition { int32 partitionId; } - struct ConsumerTopic + struct ConsumerTopicPartition { string16 topic; int32 partitionCount; } - struct ConsumerUserdata + struct ConsumerSubscriptionUserdata { uint32 userdataLength; octets[userdataLength] userdata; int32 ownedPartitions; } + struct ConsumerAssignmentUserdata + { + uint32 userdataLength; + octets[userdataLength] userdata; + } + struct ConsumerMetadataTopic { string16 name; } - struct ConsumerMetadata + struct ConsumerSubscriptionMetadata + { + int16 version; + int32 metadataTopicCount; + } + + struct ConsumerAssignmentMetadata { int16 version; int32 metadataTopicCount; diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt index fb08daee14..bc4c0de748 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/client.rpt @@ -26,6 +26,7 @@ write zilla:begin.ext ${kafka:beginEx() .timeout(45000) .topic("test") .partition(0) + .partition(1) .build() .build()} @@ -38,6 +39,10 @@ read zilla:data.ext ${kafka:dataEx() .consumer() .id("consumer-1") .partition(0) - .build() .build() + .consumer() + .id("consumer-2") + .partition(1) + .build() + .build() .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt index 884f25b101..74d720516f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/partition.assignment/server.rpt @@ -30,6 +30,7 @@ read zilla:begin.ext ${kafka:beginEx() .timeout(45000) .topic("test") .partition(0) + .partition(1) .build() .build()} @@ -43,8 +44,12 @@ write zilla:data.ext ${kafka:dataEx() .consumer() .id("consumer-1") .partition(0) - .build() .build() + .consumer() + .id("consumer-2") + .partition(1) + .build() + .build() .build()} write zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt index a97e828e3b..febbfff1a2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/client.rpt @@ -28,6 +28,7 @@ write zilla:begin.ext ${kafka:beginEx() .consumerId("consumer-1") .topic("test") .partitionId(0) + .partitionId(1) .build() .build()) .build() @@ -53,32 +54,62 @@ read advised zilla:flush ${kafka:flushEx() .consumerId("consumer-1") .topic("test") .partitionId(0) + .partitionId(1) .build() - .build()) - .build() + .build()) + .members("memberId-2", kafka:memberMetadata() + .consumerId("consumer-2") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() .build()} write ${kafka:memberAssignment() .member("memberId-1") .assignment() - .topic("test") - .partitionId(0) - .consumer() - .id("consumer-1") - .partitionId(0) - .build() - .build() - .build() - .build()} + .topic("test") + .partitionId(1) + .consumer() + .id("consumer-1") + .partitionId(1) + .build() + .consumer() + .id("consumer-2") + .partitionId(0) + .build() + .build() + .build() + .member("memberId-2") + .assignment() + .topic("test") + .partitionId(0) + .consumer() + .id("consumer-1") + .partitionId(1) + .build() + .consumer() + .id("consumer-2") + .partitionId(0) + .build() + .build() + .build() + .build()} write flush read ${kafka:topicAssignment() - .topic() - .id("test") - .partitionId(0) - .consumer() - .id("consumer-1") + .topic() + .id("test") .partitionId(0) - .build() - .build() - .build()} + .consumer() + .id("consumer-1") + .partitionId(0) + .build() + .consumer() + .id("consumer-2") + .partitionId(1) + .build() + .build() + .build()} diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt index 2501063a00..c895eeeeaa 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/partition.assignment/server.rpt @@ -32,6 +32,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .consumerId("consumer-1") .topic("test") .partitionId(0) + .partitionId(1) .build() .build()) .build() @@ -58,22 +59,48 @@ write advise zilla:flush ${kafka:flushEx() .consumerId("consumer-1") .topic("test") .partitionId(0) + .partitionId(1) .build() - .build()) - .build() + .build()) + .members("memberId-2", kafka:memberMetadata() + .consumerId("consumer-2") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() .build()} read ${kafka:memberAssignment() .member("memberId-1") .assignment() .topic("test") - .partitionId(0) + .partitionId(1) .consumer() .id("consumer-1") - .partitionId(0) - .build() + .partitionId(1) + .build() + .consumer() + .id("consumer-2") + .partitionId(0) + .build() .build() - .build() + .build() + .member("memberId-2") + .assignment() + .topic("test") + .partitionId(0) + .consumer() + .id("consumer-1") + .partitionId(1) + .build() + .consumer() + .id("consumer-2") + .partitionId(0) + .build() + .build() + .build() .build()} write ${kafka:topicAssignment() @@ -84,6 +111,10 @@ write ${kafka:topicAssignment() .id("consumer-1") .partitionId(0) .build() - .build() + .consumer() + .id("consumer-2") + .partitionId(1) + .build() + .build() .build()} write flush From c5185486d399d15823ba4d72a09a3b0aa72eb825 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Fri, 15 Sep 2023 20:45:23 +0200 Subject: [PATCH 085/115] Don't end subscribe stream when unsubscribe, no subscription (#418) --- .../client.rpt | 7 +++++-- .../server.rpt | 7 +++++-- .../session.unsubscribe.after.subscribe/client.rpt | 7 +++++-- .../session.unsubscribe.after.subscribe/server.rpt | 7 +++++-- .../session.unsubscribe.via.session.state/client.rpt | 7 +++++-- .../session.unsubscribe.via.session.state/server.rpt | 7 +++++-- .../application/unsubscribe.after.subscribe/client.rpt | 7 +++++-- .../application/unsubscribe.after.subscribe/server.rpt | 7 +++++-- .../client.rpt | 7 +++++-- .../server.rpt | 7 +++++-- .../binding/mqtt/internal/stream/MqttServerFactory.java | 9 +-------- 11 files changed, 51 insertions(+), 28 deletions(-) diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt index 16cfaa112a..310b2003b6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt @@ -84,5 +84,8 @@ write advise zilla:flush ${mqtt:flushEx() .build() .build()} -write close -read closed +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt index 95ffcd3aaa..28543eda72 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt @@ -85,5 +85,8 @@ read advised zilla:flush ${mqtt:flushEx() .build() .build()} -read closed -write close +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt index 8e7264694f..32f715dbcf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt @@ -75,5 +75,8 @@ write zilla:begin.ext ${mqtt:beginEx() connected -write close -read closed +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt index f4a805c480..d85d49fe20 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt @@ -75,5 +75,8 @@ read zilla:begin.ext ${mqtt:matchBeginEx() connected -read closed -write close +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt index e49c39952f..901faadc4f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt @@ -68,5 +68,8 @@ write zilla:begin.ext ${mqtt:beginEx() connected -write close -read closed +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt index 1ac1e8f037..9c281984f5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt @@ -66,5 +66,8 @@ read zilla:begin.ext ${mqtt:matchBeginEx() connected -read closed -write close +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt index 2863642652..e434d60926 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt @@ -28,5 +28,8 @@ write zilla:begin.ext ${mqtt:beginEx() connected -write close -read closed +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt index 06ce00f3e1..c55c1fe474 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt @@ -30,5 +30,8 @@ read zilla:begin.ext ${mqtt:matchBeginEx() connected -read closed -write close +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt index 3dc75793ec..a261a23c1f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt @@ -29,5 +29,8 @@ write zilla:begin.ext ${mqtt:beginEx() connected -write close -read closed +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt index 5c00c727b7..5e29cdd216 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt @@ -31,5 +31,8 @@ read zilla:begin.ext ${mqtt:matchBeginEx() connected -read closed -write close +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .build() + .build()} diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 1cc23a6c69..1c8a2969b1 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -4106,14 +4106,7 @@ private void doSubscribeFlushOrEnd( } else { - if (subscriptions.isEmpty()) - { - doSubscribeAppEnd(traceId); - } - else - { - doSubscribeFlush(traceId, 0, null); - } + doSubscribeFlush(traceId, 0, null); } } From 015d2efaa89c395eb6863e93d58e6f9e61174f0e Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Fri, 15 Sep 2023 12:22:57 -0700 Subject: [PATCH 086/115] Fix finding next partition id (#419) --- .../internal/stream/KafkaMergedFactory.java | 4 +- .../kafka/internal/stream/CacheMergedIT.java | 10 + .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 43 ++++ .../server.rpt | 47 ++++ .../client.rpt | 221 ++++++++++++++++++ .../server.rpt | 218 +++++++++++++++++ .../kafka/streams/application/MergedIT.java | 18 ++ 9 files changed, 561 insertions(+), 4 deletions(-) create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.message.value/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.message.value/server.rpt diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java index c9383a6581..5a0a38f09c 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java @@ -1263,7 +1263,7 @@ private int nextPartitionData( KafkaKeyFW hashKey, KafkaKeyFW key) { - final int partitionCount = leadersByAssignedId.size(); + final int partitionCount = leadersByPartitionId.size(); final int keyHash = hashKey.length() != -1 ? defaultKeyHash(hashKey) : key.length() != -1 ? defaultKeyHash(key) : nextNullKeyHashData++; @@ -1275,7 +1275,7 @@ private int nextPartitionData( private int nextPartitionFlush( KafkaKeyFW key) { - final int partitionCount = leadersByAssignedId.size(); + final int partitionCount = leadersByPartitionId.size(); final int keyHash = key.length() != -1 ? defaultKeyHash(key) : nextNullKeyHashFlush++; final int partitionId = partitionCount > 0 ? (0x7fff_ffff & keyHash) % partitionCount : 0; diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java index 7258a7122e..0b6109112f 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java @@ -605,4 +605,14 @@ public void shouldRejectMessageForInvalidPartition() throws Exception { k3po.finish(); } + + @Test + @Configuration("cache.options.merged.yaml") + @Specification({ + "${app}/merged.group.produce.message.value/client", + "${app}/unmerged.group.produce.message.value/server"}) + public void shouldProduceMergedMergedMessageValue() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/client.rpt index a37665b86d..4cfca30fad 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/client.rpt @@ -35,7 +35,7 @@ write zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() .timestamp(newTimestamp) - .partition(1, 1) + .key("message-key!") .build() .build()} write "Hello, world #A1" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/server.rpt index bc7bde33c7..7410a8a73c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.invalid.partition/server.rpt @@ -40,7 +40,7 @@ read zilla:data.ext ${kafka:dataEx() .typeId(zilla:id("kafka")) .merged() .timestamp(newTimestamp) - .partition(1, 1) + .key("message-key!") .build() .build()} read "Hello, world #A1" diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/client.rpt new file mode 100644 index 0000000000..0903d5fcf0 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/client.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .groupId("client-1") + .consumerId("consumer-1") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .key("message-key") + .build() + .build()} +write "Hello, world #A1" +write flush + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/server.rpt new file mode 100644 index 0000000000..915253a018 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/merged.group.produce.message.value/server.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_ONLY") + .topic("test") + .groupId("client-1") + .consumerId("consumer-1") + .ackMode("LEADER_ONLY") + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .timestamp(newTimestamp) + .key("message-key") + .build() + .build()} +read "Hello, world #A1" + diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.message.value/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.message.value/client.rpt new file mode 100644 index 0000000000..9a9aec1d74 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.message.value/client.rpt @@ -0,0 +1,221 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} + +read notify RECEIVED_CONFIG + +connect await RECEIVED_CONFIG + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +read zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .build() + .build()} +read notify PARTITION_COUNT_2 + +connect await PARTITION_COUNT_2 + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(45000) + .metadata(kafka:memberMetadata() + .consumerId("consumer-1") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("consumer-1") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +write ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .partitionId(1) + .consumer() + .id("consumer-1") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build() + .build()} +write flush + +read ${kafka:topicAssignment() + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("consumer-1") + .partitionId(0) + .build() + .consumer() + .id("consumer-2") + .partitionId(1) + .build() + .build() + .build()} + +read notify RECEIVED_CONSUMER + +connect await RECEIVED_CONSUMER + "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + option zilla:affinity 1 + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("message-key") + .build() + .build()} +write "Hello, world #A1" +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.message.value/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.message.value/server.rpt new file mode 100644 index 0000000000..d5ba82e974 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.produce.message.value/server.rpt @@ -0,0 +1,218 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property deltaMillis 0L +property newTimestamp ${kafka:timestamp() + deltaMillis} +property padding 0 + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .describe() + .topic("test") + .config("cleanup.policy") + .config("max.message.bytes") + .config("segment.bytes") + .config("segment.index.bytes") + .config("segment.ms") + .config("retention.bytes") + .config("retention.ms") + .config("delete.retention.ms") + .config("min.compaction.lag.ms") + .config("max.compaction.lag.ms") + .config("min.cleanable.dirty.ratio") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .describe() + .config("cleanup.policy", "delete") + .config("max.message.bytes", 1000012) + .config("segment.bytes", 1073741824) + .config("segment.index.bytes", 10485760) + .config("segment.ms", 604800000) + .config("retention.bytes", -1) + .config("retention.ms", 604800000) + .config("delete.retention.ms", 86400000) + .config("min.compaction.lag.ms", 0) + .config("max.compaction.lag.ms", 9223372036854775807) + .config("min.cleanable.dirty.ratio", 0.5) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .meta() + .topic("test") + .build() + .build()} +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .meta() + .partition(0, 1) + .partition(1, 2) + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(0) + .metadata(kafka:memberMetadata() + .consumerId("consumer-1") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("client-1") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1", kafka:memberMetadata() + .consumerId("consumer-1") + .topic("test") + .partitionId(0) + .partitionId(1) + .build() + .build()) + .build() + .build()} + +read ${kafka:memberAssignment() + .member("memberId-1") + .assignment() + .topic("test") + .partitionId(0) + .partitionId(1) + .consumer() + .id("consumer-1") + .partitionId(0) + .partitionId(1) + .build() + .build() + .build() + .build()} + +write ${kafka:topicAssignment() + .topic() + .id("test") + .partitionId(0) + .consumer() + .id("consumer-1") + .partitionId(0) + .build() + .consumer() + .id("consumer-2") + .partitionId(1) + .build() + .build() + .build()} +write flush + +accepted + +read zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .produce() + .topic("test") + .partition(0) + .build() + .build()} +write flush + + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .produce() + .timestamp(newTimestamp) + .ackMode("LEADER_ONLY") + .key("message-key") + .build() + .build()} +read "Hello, world #A1" diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java index fbc593ab21..ad819c9df3 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/MergedIT.java @@ -683,4 +683,22 @@ public void shouldRejectMergedMessageForInvalidPartition() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/merged.group.produce.message.value/client", + "${app}/merged.group.produce.message.value/server"}) + public void shouldProduceMergedMergedMessageValue() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/unmerged.group.produce.message.value/client", + "${app}/unmerged.group.produce.message.value/server"}) + public void shouldProduceUnmergedMergedMessageValue() throws Exception + { + k3po.finish(); + } } From 580e8261468e9fef58821a4cc4aa3a3aed1639b9 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Sat, 16 Sep 2023 15:57:50 +0200 Subject: [PATCH 087/115] Remove default kafka topic names (#416) --- .../binding/mqtt/kafka/config/proxy.yaml | 5 ++ .../kafka/schema/mqtt.kafka.schema.patch.json | 25 ++++++--- .../publish.client.sent.abort/client.rpt | 2 +- .../publish.client.sent.abort/server.rpt | 2 +- .../publish.client.sent.reset/client.rpt | 2 +- .../publish.client.sent.reset/server.rpt | 2 +- .../kafka/publish.empty.message/client.rpt | 2 +- .../kafka/publish.empty.message/server.rpt | 2 +- .../kafka/publish.multiple.clients/client.rpt | 4 +- .../kafka/publish.multiple.clients/server.rpt | 4 +- .../publish.multiple.messages/client.rpt | 2 +- .../publish.multiple.messages/server.rpt | 2 +- .../kafka/publish.one.message/client.rpt | 4 +- .../kafka/publish.one.message/server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../streams/kafka/publish.retained/client.rpt | 4 +- .../streams/kafka/publish.retained/server.rpt | 4 +- .../publish.server.sent.abort/client.rpt | 2 +- .../publish.server.sent.abort/server.rpt | 2 +- .../kafka/publish.server.sent.data/client.rpt | 2 +- .../kafka/publish.server.sent.data/server.rpt | 2 +- .../publish.server.sent.flush/client.rpt | 2 +- .../publish.server.sent.flush/server.rpt | 2 +- .../publish.server.sent.reset/client.rpt | 2 +- .../publish.server.sent.reset/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../publish.with.user.property/client.rpt | 2 +- .../publish.with.user.property/server.rpt | 2 +- .../client.rpt | 6 +-- .../server.rpt | 6 +-- .../client.rpt | 12 ++--- .../server.rpt | 12 ++--- .../session.cancel.session.expiry/client.rpt | 2 +- .../session.cancel.session.expiry/server.rpt | 2 +- .../session.client.sent.reset/client.rpt | 4 +- .../session.client.sent.reset/server.rpt | 4 +- .../kafka/session.client.takeover/client.rpt | 12 ++--- .../kafka/session.client.takeover/server.rpt | 14 ++--- .../client.rpt | 6 +-- .../server.rpt | 6 +-- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../session.exists.clean.start/client.rpt | 10 ++-- .../session.exists.clean.start/server.rpt | 12 ++--- .../client.rpt | 4 +- .../server.rpt | 4 +- .../streams/kafka/session.redirect/client.rpt | 4 +- .../streams/kafka/session.redirect/server.rpt | 4 +- .../session.server.sent.reset/client.rpt | 4 +- .../session.server.sent.reset/server.rpt | 4 +- .../client.rpt | 6 +-- .../server.rpt | 6 +-- .../kafka/session.subscribe/client.rpt | 6 +-- .../kafka/session.subscribe/server.rpt | 6 +-- .../client.rpt | 6 +-- .../server.rpt | 6 +-- .../client.rpt | 6 +-- .../server.rpt | 6 +-- .../client.rpt | 14 ++--- .../server.rpt | 14 ++--- .../client.rpt | 14 ++--- .../server.rpt | 14 ++--- .../client.rpt | 6 +-- .../server.rpt | 6 +-- .../client.rpt | 6 +-- .../server.rpt | 6 +-- .../client.rpt | 8 +-- .../server.rpt | 8 +-- .../client.rpt | 12 ++--- .../server.rpt | 12 ++--- .../client.rpt | 10 ++-- .../server.rpt | 10 ++-- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../subscribe.client.sent.abort/client.rpt | 2 +- .../subscribe.client.sent.abort/server.rpt | 2 +- .../subscribe.client.sent.data/client.rpt | 2 +- .../subscribe.client.sent.data/server.rpt | 2 +- .../subscribe.client.sent.reset/client.rpt | 2 +- .../subscribe.client.sent.reset/server.rpt | 2 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 6 +-- .../server.rpt | 6 +-- .../subscribe.filter.change.retain/client.rpt | 4 +- .../subscribe.filter.change.retain/server.rpt | 4 +- .../subscribe.multiple.message/client.rpt | 2 +- .../subscribe.multiple.message/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../kafka/subscribe.one.message/client.rpt | 2 +- .../kafka/subscribe.one.message/server.rpt | 2 +- .../subscribe.publish.no.local/client.rpt | 4 +- .../subscribe.publish.no.local/server.rpt | 4 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../streams/kafka/subscribe.retain/client.rpt | 4 +- .../streams/kafka/subscribe.retain/server.rpt | 4 +- .../client.rpt | 4 +- .../server.rpt | 4 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../subscribe.server.sent.abort/client.rpt | 2 +- .../subscribe.server.sent.abort/server.rpt | 2 +- .../subscribe.server.sent.flush/client.rpt | 2 +- .../subscribe.server.sent.flush/server.rpt | 2 +- .../subscribe.server.sent.reset/client.rpt | 2 +- .../subscribe.server.sent.reset/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../unsubscribe.after.subscribe/client.rpt | 2 +- .../unsubscribe.after.subscribe/server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../internal/MqttKafkaConfiguration.java | 8 --- .../config/MqttKafkaBindingConfig.java | 5 +- .../config/MqttKafkaOptionsConfigAdapter.java | 52 ++++--------------- .../internal/MqttKafkaConfigurationTest.java | 6 --- .../MqttKafkaOptionsConfigAdapterTest.java | 14 ----- 157 files changed, 340 insertions(+), 391 deletions(-) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml index 0bd125e0fb..42fa9338d1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml @@ -19,4 +19,9 @@ bindings: mqtt0: type: mqtt-kafka kind: proxy + options: + topics: + sessions: mqtt-sessions + messages: mqtt-messages + retained: mqtt-retained exit: kafka0 diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json index 9d4fc0549d..11b2a0e3f7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json @@ -45,25 +45,33 @@ "sessions": { "title": "Kafka Sessions Topic", - "type": "string", - "default": "mqtt_sessions" + "type": "string" }, "messages": { "title": "Kafka Messages Topic", - "type": "string", - "default": "mqtt_messages" + "type": "string" }, "retained": { "title": "Kafka Retained Topic", - "type": "string", - "default": "mqtt_retained" - } + "type": "string" + }, + "additionalProperties": false }, + "required": + [ + "sessions", + "retained", + "messages" + ], "additionalProperties": false } }, + "required": + [ + "topics" + ], "additionalProperties": false }, "routes": false @@ -73,7 +81,8 @@ { "required": [ - "exit" + "exit", + "options" ] } ] diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt index 03d42cb8c5..4583aba9af 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt index 4e8812f240..0ac8dd20c6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt index dbfe5cc1e1..7e509d96a0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt index 43a6bb2fe5..6b7e4b4dd1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt index 16715f6575..dcfcdff404 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt index 79343cc379..06ae32f42e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt index 544185a2f2..f3fe1188d3 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -89,7 +89,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt index 0ca6e64b9e..31d90ff103 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -83,7 +83,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt index 997314b9e1..75c0a41415 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt index ebd271846d..6219c1242b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt index bdea0ddfcb..e26510e240 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -41,7 +41,7 @@ write zilla:data.ext ${kafka:dataEx() .headerInt("zilla:timeout-ms", 15000) .header("zilla:content-type", "message") .header("zilla:format", "TEXT") - .header("zilla:reply-to", "mqtt_messages") + .header("zilla:reply-to", "mqtt-messages") .header("zilla:reply-key", "sensor/one") .header("zilla:reply-filter", "sensor") .header("zilla:reply-filter", "one") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt index ebbdf81dd1..29c47e3469 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -44,7 +44,7 @@ read zilla:data.ext ${kafka:matchDataEx() .headerInt("zilla:timeout-ms", 15000) .header("zilla:content-type", "message") .header("zilla:format", "TEXT") - .header("zilla:reply-to", "mqtt_messages") + .header("zilla:reply-to", "mqtt-messages") .header("zilla:reply-key", "sensor/one") .header("zilla:reply-filter", "sensor") .header("zilla:reply-filter", "one") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt index df2ebc41b2..e2e758ab9b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -40,7 +40,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt index 4532e49e0f..d36d277fcc 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -39,7 +39,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt index dd926e3179..9db3b8a094 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -41,7 +41,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt index 3fca45a21a..cb7cccd4cc 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -39,7 +39,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt index d4deb19b4f..812b524ed5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -41,7 +41,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt index d9deb564ae..78c5c9d6d4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -38,7 +38,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt index 6548ccce19..0b15c9f057 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -40,7 +40,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt index 6a696cbb2a..8daa57e977 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -38,7 +38,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt index 418d9c94af..c4be97bfc6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -85,7 +85,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt index da7e2a6718..17da157dac 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -84,7 +84,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt index 85e541a7b4..88622410a9 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt index d1655b8bb3..e21a1bdb56 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt index 28f6ca1b52..ef71447f20 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt index 8ba85b97fb..f2a6da9f44 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt index d5561c9f80..ef6ea2280d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt index 6b232483f0..fb4be6ab02 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt index d8050b11e2..564216eb9d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt index ed35626006..9fe018d660 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt index be93f2900d..587bea6745 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt index 6044b6f9ed..ee889e8d1b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt index f4b9cb78c3..77f5183002 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt index c126ded380..567ae9bd64 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt index 96824eac7c..e5c66df5de 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt index eb22fd5484..89a6ce7b6b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt index 7df85f13d7..8506d5fce5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -106,7 +106,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -176,7 +176,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt index 074efefd62..ef9663a613 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -111,7 +111,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -174,7 +174,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt index 8256a78bba..627d7be1ee 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -91,7 +91,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -210,7 +210,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -237,7 +237,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -302,7 +302,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -341,7 +341,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt index 9d236ff7ad..0e1de2bd8e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -83,7 +83,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -201,7 +201,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -224,7 +224,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -278,7 +278,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -314,7 +314,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt index 4250a2f898..7280c56980 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt index 7dd3cc0d8a..9f3362b0c9 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt @@ -27,7 +27,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt index a9bb4442fb..782fd686d2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -90,7 +90,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt index b743a3bcfb..7629a6188f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -83,7 +83,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt index 4c588f81a2..e004bc52f0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -103,7 +103,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -248,7 +248,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -275,7 +275,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -370,7 +370,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -454,7 +454,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt index 386a69ea77..0bfb969589 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -76,7 +76,7 @@ read zilla:data.empty read advised zilla:flush -# On the session stream the heartbeat arrives (on the mqtt_sessions merged stream) +# On the session stream the heartbeat arrives (on the mqtt-sessions merged stream) read await HEARTBEAT1_SENT write advise zilla:flush ${kafka:flushEx() @@ -104,7 +104,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -246,7 +246,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -269,7 +269,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -363,7 +363,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -445,7 +445,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt index 7f37709283..4c092b32a4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -106,7 +106,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -177,7 +177,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt index ccdd5717f4..861a9a642b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -111,7 +111,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -174,7 +174,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt index fba6e61b02..cc2993adc1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -98,7 +98,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt index ebbfc98724..37d998bce5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -90,7 +90,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt index ea5829e300..f66932d8cd 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -98,7 +98,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt index ca476f7775..627ce38229 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -90,7 +90,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt index 1f4ad590ea..124979c6a5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -102,7 +102,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -249,7 +249,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -276,7 +276,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -366,7 +366,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt index be00f4d0c5..f42dcde687 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -74,7 +74,7 @@ write flush read zilla:data.empty -# On the session stream the heartbeat arrives (on the mqtt_sessions merged stream) +# On the session stream the heartbeat arrives (on the mqtt-sessions merged stream) read advised zilla:flush write advise zilla:flush ${kafka:flushEx() @@ -102,7 +102,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -249,7 +249,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -272,7 +272,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -364,7 +364,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt index c80dd8c3b7..54ed18879f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -89,7 +89,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt index aeba744dc5..baccf1ddd6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -86,7 +86,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt index babbaf3778..8a99981fdf 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .consumerId("mqtt-1.example.com:1883") .filter() @@ -89,7 +89,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .consumerId("mqtt-1.example.com:1883") .filter() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt index 424d1853d0..21e5089dea 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .consumerId("mqtt-1.example.com:1883") .filter() @@ -84,7 +84,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .consumerId("mqtt-1.example.com:1883") .filter() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt index 9b47555d45..908ebfbfd8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -87,7 +87,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt index 9ed5aad6ce..7254fa625b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -82,7 +82,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt index 489f71b6e7..18b3359464 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -85,7 +85,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -168,7 +168,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt index 0339f8324e..5bd67d35fd 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -78,7 +78,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -161,7 +161,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt index 45123d301e..3eb35e357b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -88,7 +88,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -184,7 +184,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt index 96edeb8d81..1457648966 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -83,7 +83,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -178,7 +178,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt index bffdd9bd34..f03445143c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -85,7 +85,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -207,7 +207,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt index 88215301ce..a66373b56e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -78,7 +78,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -198,7 +198,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt index 9b891d6105..89ea472605 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -85,7 +85,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -195,7 +195,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt index 289fcda181..3807c05c93 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -78,7 +78,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -186,7 +186,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt index c3072a3fff..b2ba50fd08 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -119,7 +119,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -146,7 +146,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -215,7 +215,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -389,7 +389,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() @@ -432,7 +432,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -467,7 +467,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt index 19479d32b0..5021ae6338 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -129,7 +129,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -152,7 +152,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -215,7 +215,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -392,7 +392,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() @@ -431,7 +431,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -462,7 +462,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt index de56d9bb7a..c174f154f0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -119,7 +119,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -146,7 +146,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -215,7 +215,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -393,7 +393,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() @@ -438,7 +438,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -455,7 +455,7 @@ write zilla:data.ext ${kafka:dataEx() .header("zilla:filter", "obituaries") .headerInt("zilla:timeout-ms", 15000) .header("zilla:format", "TEXT") - .header("zilla:reply-to", "mqtt_messages") + .header("zilla:reply-to", "mqtt-messages") .header("zilla:reply-key", "responses/client1") .header("zilla:reply-filter", "responses") .header("zilla:reply-filter", "client1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt index ac8597bdf6..75656273f8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -129,7 +129,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -152,7 +152,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -214,7 +214,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -394,7 +394,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() @@ -435,7 +435,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() @@ -454,7 +454,7 @@ read zilla:data.ext ${kafka:matchDataEx() .header("zilla:filter", "obituaries") .headerInt("zilla:timeout-ms", 15000) .header("zilla:format", "TEXT") - .header("zilla:reply-to", "mqtt_messages") + .header("zilla:reply-to", "mqtt-messages") .header("zilla:reply-key", "responses/client1") .header("zilla:reply-filter", "responses") .header("zilla:reply-filter", "client1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt index df38527087..b1e3d9e768 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -78,7 +78,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() @@ -118,7 +118,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt index 6771dc374a..16e530e395 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt @@ -27,7 +27,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -80,7 +80,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() @@ -117,7 +117,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt index 654ca5d7a0..9e79635250 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -64,7 +64,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -133,7 +133,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt index 703704c5d6..0e20b9315f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -68,7 +68,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -129,7 +129,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt index a8480b4e22..7d5f199a10 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -86,7 +86,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -133,7 +133,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -202,7 +202,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt index f0c59441de..7e0fb62e2a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -92,7 +92,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -136,7 +136,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -199,7 +199,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt index 3f197ab1f2..70ec7a00d8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -119,7 +119,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -146,7 +146,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -224,7 +224,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -388,7 +388,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() @@ -430,7 +430,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt index c34daded3c..4ffd495ed4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -129,7 +129,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -152,7 +152,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -228,7 +228,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -396,7 +396,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() @@ -434,7 +434,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt index ca62e7f49c..4cea7bf2ac 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -96,7 +96,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -123,7 +123,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -192,7 +192,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -367,7 +367,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt index 53548bf530..99dbf9f33f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt @@ -24,7 +24,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -104,7 +104,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#will-signal") @@ -127,7 +127,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1#migrate") @@ -188,7 +188,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .key("client-1") @@ -367,7 +367,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .filter() .key("client-1#will-1e6a1eb5-810a-459d-a12c-a6fa08f228d1") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt index 682eb828e6..10c9e5ef9b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -47,7 +47,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt index e7fe1ca308..3fc0dc810e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -47,7 +47,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt index 37157d06e7..d650829708 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -47,7 +47,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt index 144a12764f..dc540e79ad 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -47,7 +47,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt index fd91c8e9e1..75d681e200 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -46,7 +46,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt index eb15a3f2e1..44c7564630 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt @@ -25,7 +25,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") @@ -46,7 +46,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_AND_FETCH") - .topic("mqtt_sessions") + .topic("mqtt-sessions") .groupId("mqtt-clients") .filter() .header("type", "will-signal") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt index 07f86fd157..42febc297e 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt index a9cdce7773..249f000fd6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt index c7f6fb0831..74cd335878 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt index 3a4a22dca7..08266fd63d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt index 0fedcb6313..cf90ccb5ae 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt index 57555a69f5..33a4cd48c0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt index 4b6b546d33..b5e058ea9d 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -146,7 +146,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt index dbd72d3264..4f5542f2ec 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -148,7 +148,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt index 68c830f04f..0e51052e64 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -154,7 +154,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt index 7a8de77b33..a58a77e6a4 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -160,7 +160,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt index 0cc0083143..8b015c3f7f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") @@ -67,7 +67,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -185,7 +185,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt index b121a88e65..388f259a09 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") @@ -66,7 +66,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -185,7 +185,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt index 4ff82b94df..7a2fbc461f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -152,7 +152,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt index 77a953a81c..3eaf1b11dd 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -159,7 +159,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt index a9be513247..0dc97a4e64 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt index 7ec7c0a1e1..9c3a5f0ae7 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt index a57afd171b..76fd2f4b7c 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt index 2ab07d5427..db41db77df 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt index 2eae72f9d6..45b065c2b8 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt index 6cfe4c4e2e..77147647be 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt index a9be513247..0dc97a4e64 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt index d309133aad..d40f6234ba 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt index 22bcf36bbb..ad316bf519 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -62,7 +62,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt index 6d725ff94c..f9735c5ce2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") @@ -62,7 +62,7 @@ read zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("PRODUCE_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .partition(-1, -2) .ackMode("LEADER_ONLY") .build() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt index 299696c150..10b353f6de 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt index 8ad99f9fc5..3c399fdc23 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt index fda180c3ea..d21c2fd61a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt index ccd7462d68..e3ecdaf7ab 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt index dcd25c5048..10e110e5a1 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") @@ -68,7 +68,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt index e2b4ff4d5d..ab47fb0e89 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") @@ -66,7 +66,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt index cb9063ac25..015cad105a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") @@ -47,7 +47,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt index fa1dba8698..0e010982af 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") @@ -44,7 +44,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt index dba69761a5..f39375e961 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt index 14b873eb07..e87b038279 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_retained") + .topic("mqtt-retained") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt index 4449ffa1e5..b0ed384d92 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt index dcf3c7e1b8..3c84065f22 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt index 80622344b5..06e86035df 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt index ea8220fb1d..fa59abf8ee 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt index 0a278837fe..1072259543 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt index b7fae7c2b1..85495f1536 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt index 64893d8fa8..41dd5cfac0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt index e99cd37353..d8072a6049 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt index c7839119ce..022a6f0e82 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt index d6fc5bac27..431e58d345 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt index d1e5384a94..bdff98e118 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt index b196c48821..5bc23a0493 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt index 2a8e12eeb4..524ef4f2b2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt index d79e051bb1..4ea64808b5 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt index b388d28b7e..9df346ca49 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt index 62719bea04..eca0521ab6 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt index 72d62a020f..fdb64b861f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt index bc2bf474bf..ea4e40f667 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt index 356c44a7b0..11fbfac661 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt index 55647a946e..c3982cc85b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index 750d5e5eee..926bc2d4e2 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index bcb29c8bcb..48f2130f6b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt index 9b0fbef39a..854fbef248 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt index e2358e7b12..0b6321c810 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt index 413d0be841..44cc63a326 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt index 3ca0a0c806..bc72156423 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt index c79b4bde45..a52610fe01 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt @@ -21,7 +21,7 @@ write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt index a5e90e95ea..01b777ce0f 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt @@ -23,7 +23,7 @@ read zilla:begin.ext ${kafka:matchBeginEx() .typeId(zilla:id("kafka")) .merged() .capabilities("FETCH_ONLY") - .topic("mqtt_messages") + .topic("mqtt-messages") .filter() .headers("zilla:filter") .sequence("sensor") diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java index f04b2a3b27..2dc397183d 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java @@ -29,12 +29,7 @@ public class MqttKafkaConfiguration extends Configuration { - public static final String MQTT_CLIENTS_GROUP_ID = "mqtt-clients"; private static final ConfigurationDef MQTT_KAFKA_CONFIG; - - public static final PropertyDef MESSAGES_TOPIC; - public static final PropertyDef RETAINED_MESSAGES_TOPIC; - public static final PropertyDef SESSIONS_TOPIC; public static final PropertyDef SESSION_ID; public static final PropertyDef WILL_ID; public static final PropertyDef LIFETIME_ID; @@ -46,9 +41,6 @@ public class MqttKafkaConfiguration extends Configuration static { final ConfigurationDef config = new ConfigurationDef("zilla.binding.mqtt.kafka"); - MESSAGES_TOPIC = config.property("messages.topic", "mqtt_messages"); - RETAINED_MESSAGES_TOPIC = config.property("retained.messages.topic", "mqtt_retained"); - SESSIONS_TOPIC = config.property("sessions.topic", "mqtt_sessions"); SESSION_ID = config.property(StringSupplier.class, "session.id", MqttKafkaConfiguration::decodeStringSupplier, MqttKafkaConfiguration::defaultSessionId); WILL_ID = config.property(StringSupplier.class, "will.id", diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java index 4cce257eeb..53b57260ff 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java @@ -17,7 +17,6 @@ import static java.util.stream.Collectors.toList; import java.util.List; -import java.util.Optional; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.stream.MqttKafkaSessionFactory; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.String16FW; @@ -38,9 +37,7 @@ public MqttKafkaBindingConfig( { this.id = binding.id; this.kind = binding.kind; - this.options = Optional.ofNullable(binding.options) - .map(MqttKafkaOptionsConfig.class::cast) - .orElse(MqttKafkaOptionsConfigAdapter.DEFAULT); + this.options = (MqttKafkaOptionsConfig) binding.options; this.routes = binding.routes.stream().map(MqttKafkaRouteConfig::new).collect(toList()); } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java index a186c84c26..dbceb89053 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java @@ -32,15 +32,6 @@ public class MqttKafkaOptionsConfigAdapter implements OptionsConfigAdapterSpi, J private static final String MESSAGES_NAME = "messages"; private static final String RETAINED_NAME = "retained"; - private static final String16FW SESSIONS_DEFAULT = new String16FW("mqtt_sessions"); - private static final String16FW MESSAGES_DEFAULT = new String16FW("mqtt_messages"); - private static final String16FW RETAINED_DEFAULT = new String16FW("mqtt_retained"); - private static final MqttKafkaTopicsConfig TOPICS_DEFAULT = - new MqttKafkaTopicsConfig(SESSIONS_DEFAULT, MESSAGES_DEFAULT, RETAINED_DEFAULT); - - public static final MqttKafkaOptionsConfig DEFAULT = - new MqttKafkaOptionsConfig(TOPICS_DEFAULT); - @Override public Kind kind() { @@ -63,27 +54,23 @@ public JsonObject adaptToJson( MqttKafkaTopicsConfig topics = mqttKafkaOptions.topics; - if (topics != null && - !TOPICS_DEFAULT.equals(topics)) + if (topics != null) { JsonObjectBuilder newTopics = Json.createObjectBuilder(); String16FW sessions = topics.sessions; - if (sessions != null && - !(SESSIONS_DEFAULT.equals(sessions))) + if (sessions != null) { newTopics.add(SESSIONS_NAME, sessions.asString()); } String16FW messages = topics.messages; - if (messages != null && - !MESSAGES_DEFAULT.equals(messages)) + if (messages != null) { newTopics.add(MESSAGES_NAME, messages.asString()); } String16FW retained = topics.retained; - if (retained != null && - !RETAINED_DEFAULT.equals(retained)) + if (retained != null) { newTopics.add(RETAINED_NAME, retained.asString()); } @@ -98,34 +85,13 @@ public JsonObject adaptToJson( public OptionsConfig adaptFromJson( JsonObject object) { - MqttKafkaTopicsConfig newTopics = TOPICS_DEFAULT; - - if (object.containsKey(TOPICS_NAME)) - { - JsonObject topics = object.getJsonObject(TOPICS_NAME); - String16FW newSessions = SESSIONS_DEFAULT; + JsonObject topics = object.getJsonObject(TOPICS_NAME); - if (topics.containsKey(SESSIONS_NAME)) - { - newSessions = new String16FW(topics.getString(SESSIONS_NAME)); - } + String16FW newSessions = new String16FW(topics.getString(SESSIONS_NAME)); + String16FW newMessages = new String16FW(topics.getString(MESSAGES_NAME)); + String16FW newRetained = new String16FW(topics.getString(RETAINED_NAME)); - String16FW newMessages = MESSAGES_DEFAULT; - - if (topics.containsKey(MESSAGES_NAME)) - { - newMessages = new String16FW(topics.getString(MESSAGES_NAME)); - } - - String16FW newRetained = RETAINED_DEFAULT; - - if (topics.containsKey(RETAINED_NAME)) - { - newRetained = new String16FW(topics.getString(RETAINED_NAME)); - } - - newTopics = new MqttKafkaTopicsConfig(newSessions, newMessages, newRetained); - } + MqttKafkaTopicsConfig newTopics = new MqttKafkaTopicsConfig(newSessions, newMessages, newRetained); return new MqttKafkaOptionsConfig(newTopics); } diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java index cc7a4145c3..c962645286 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java @@ -17,8 +17,6 @@ import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.INSTANCE_ID; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.LIFETIME_ID; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.MESSAGES_TOPIC; -import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.RETAINED_MESSAGES_TOPIC; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.SESSION_ID; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.TIME; import static io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.MqttKafkaConfiguration.WILL_AVAILABLE; @@ -30,8 +28,6 @@ public class MqttKafkaConfigurationTest { - public static final String MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.messages.topic"; - public static final String RETAINED_MESSAGES_TOPIC_NAME = "zilla.binding.mqtt.kafka.retained.messages.topic"; public static final String TIME_NAME = "zilla.binding.mqtt.kafka.time"; public static final String WILL_AVAILABLE_NAME = "zilla.binding.mqtt.kafka.will.available"; public static final String WILL_STREAM_RECONNECT_DELAY_NAME = "zilla.binding.mqtt.kafka.will.stream.reconnect"; @@ -43,8 +39,6 @@ public class MqttKafkaConfigurationTest @Test public void shouldVerifyConstants() { - assertEquals(MESSAGES_TOPIC.name(), MESSAGES_TOPIC_NAME); - assertEquals(RETAINED_MESSAGES_TOPIC.name(), RETAINED_MESSAGES_TOPIC_NAME); assertEquals(TIME.name(), TIME_NAME); assertEquals(WILL_AVAILABLE.name(), WILL_AVAILABLE_NAME); assertEquals(WILL_STREAM_RECONNECT_DELAY.name(), WILL_STREAM_RECONNECT_DELAY_NAME); diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java index 48445b96ae..14e620e9ee 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java @@ -40,20 +40,6 @@ public void initJson() jsonb = JsonbBuilder.create(config); } - @Test - public void shouldReadOptionsWithDefaults() - { - String text = "{ }"; - - MqttKafkaOptionsConfig options = jsonb.fromJson(text, MqttKafkaOptionsConfig.class); - - assertThat(options, not(nullValue())); - assertThat(options.topics, not(nullValue())); - assertThat(options.topics.sessions.asString(), equalTo("mqtt_sessions")); - assertThat(options.topics.messages.asString(), equalTo("mqtt_messages")); - assertThat(options.topics.retained.asString(), equalTo("mqtt_retained")); - } - @Test public void shouldReadOptions() { From 6748741bc4949bca130b8a32c94621e97be4263f Mon Sep 17 00:00:00 2001 From: bmaidics Date: Sun, 17 Sep 2023 20:18:00 +0200 Subject: [PATCH 088/115] Serverref change (#422) --- .../binding/mqtt/kafka/config/proxy.yaml | 1 + .../kafka/schema/mqtt.kafka.schema.patch.json | 5 ++++ .../streams/mqtt/session.redirect/client.rpt | 1 - .../streams/mqtt/session.redirect/server.rpt | 1 - .../config/MqttKafkaOptionsConfig.java | 5 +++- .../config/MqttKafkaOptionsConfigAdapter.java | 9 ++++++- .../stream/MqttKafkaSessionFactory.java | 11 +++++---- .../MqttKafkaOptionsConfigAdapterTest.java | 5 +++- .../binding/mqtt/internal/MqttFunctions.java | 24 +------------------ .../main/resources/META-INF/zilla/mqtt.idl | 1 - .../client.rpt | 1 - .../server.rpt | 1 - .../client.rpt | 1 - .../server.rpt | 1 - .../mqtt/internal/MqttFunctionsTest.java | 6 +---- .../internal/stream/MqttServerFactory.java | 1 - .../command/log/internal/LoggableStream.java | 3 +-- 17 files changed, 31 insertions(+), 46 deletions(-) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml index 42fa9338d1..1a9197f8c0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml @@ -20,6 +20,7 @@ bindings: type: mqtt-kafka kind: proxy options: + server: mqtt-1.example.com:1883 topics: sessions: mqtt-sessions messages: mqtt-messages diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json index 11b2a0e3f7..e49e47ccde 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json @@ -36,6 +36,11 @@ { "properties": { + "server": + { + "title": "Server Reference", + "type": "string" + }, "topics": { "title": "Topics", diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt index 14a46dcfcd..c38a0850d0 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt @@ -22,7 +22,6 @@ write zilla:begin.ext ${mqtt:beginEx() .session() .expiry(1) .clientId("client-1") - .serverRef("mqtt-1.example.com:1883") .build() .build()} diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt index bffc992ae2..7b9e265b8b 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt @@ -23,7 +23,6 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client-1") - .serverRef("mqtt-1.example.com:1883") .build() .build()} diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java index b2d8c55c28..6b4a3e5225 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java @@ -19,10 +19,13 @@ public class MqttKafkaOptionsConfig extends OptionsConfig { public final MqttKafkaTopicsConfig topics; + public final String serverRef; public MqttKafkaOptionsConfig( - MqttKafkaTopicsConfig topics) + MqttKafkaTopicsConfig topics, + String serverRef) { this.topics = topics; + this.serverRef = serverRef; } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java index dbceb89053..2b1b665593 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java @@ -28,6 +28,7 @@ public class MqttKafkaOptionsConfigAdapter implements OptionsConfigAdapterSpi, JsonbAdapter { private static final String TOPICS_NAME = "topics"; + private static final String SERVER_NAME = "server"; private static final String SESSIONS_NAME = "sessions"; private static final String MESSAGES_NAME = "messages"; private static final String RETAINED_NAME = "retained"; @@ -52,8 +53,13 @@ public JsonObject adaptToJson( JsonObjectBuilder object = Json.createObjectBuilder(); + String serverRef = mqttKafkaOptions.serverRef; MqttKafkaTopicsConfig topics = mqttKafkaOptions.topics; + if (serverRef != null) + { + object.add(SERVER_NAME, serverRef); + } if (topics != null) { JsonObjectBuilder newTopics = Json.createObjectBuilder(); @@ -86,6 +92,7 @@ public OptionsConfig adaptFromJson( JsonObject object) { JsonObject topics = object.getJsonObject(TOPICS_NAME); + String server = object.getString(SERVER_NAME, null); String16FW newSessions = new String16FW(topics.getString(SESSIONS_NAME)); String16FW newMessages = new String16FW(topics.getString(MESSAGES_NAME)); @@ -93,6 +100,6 @@ public OptionsConfig adaptFromJson( MqttKafkaTopicsConfig newTopics = new MqttKafkaTopicsConfig(newSessions, newMessages, newRetained); - return new MqttKafkaOptionsConfig(newTopics); + return new MqttKafkaOptionsConfig(newTopics, server); } } diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index 2d172bec98..8ca0c591f5 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -197,6 +197,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final boolean willAvailable; private final int reconnectDelay; + private String serverRef; private int reconnectAttempt; public MqttKafkaSessionFactory( @@ -273,6 +274,7 @@ public void onAttached( long bindingId) { MqttKafkaBindingConfig binding = supplyBinding.apply(bindingId); + this.serverRef = binding.options.serverRef; if (willAvailable && coreIndex == 0) { Optional route = binding.routes.stream().findFirst(); @@ -324,7 +326,6 @@ private final class MqttSessionProxy private String16FW clientId; private String16FW clientIdMigrate; - private String serverRef; private int sessionExpiryMillis; private int sessionFlags; private int sessionPadding; @@ -416,7 +417,6 @@ private void onMqttBegin( sessionExpiryMillis = (int) SECONDS.toMillis(mqttSessionBeginEx.expiry()); sessionFlags = mqttSessionBeginEx.flags(); - serverRef = mqttSessionBeginEx.serverRef().asString(); if (!isSetWillFlag(sessionFlags) || isSetCleanStart(sessionFlags)) { @@ -2618,7 +2618,7 @@ protected void doKafkaBegin(long traceId, long authorization, long affinity) kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, affinity, delegate.sessionsTopic, null, delegate.clientIdMigrate, - delegate.sessionId, delegate.serverRef, KafkaCapabilities.PRODUCE_AND_FETCH); + delegate.sessionId, serverRef, KafkaCapabilities.PRODUCE_AND_FETCH); } @Override @@ -2703,7 +2703,7 @@ protected void doKafkaBegin( KafkaCapabilities.PRODUCE_ONLY : KafkaCapabilities.PRODUCE_AND_FETCH; kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, delegate.clientIdMigrate, - delegate.sessionId, delegate.serverRef, capabilities); + delegate.sessionId, serverRef, capabilities); } @Override @@ -2879,7 +2879,7 @@ protected void doKafkaBegin( state = MqttKafkaState.openingInitial(state); kafka = newKafkaStream(super::onKafkaMessage, originId, routedId, initialId, initialSeq, initialAck, initialMax, - traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, delegate.serverRef); + traceId, authorization, affinity, delegate.sessionsTopic, delegate.clientId, serverRef); } } @@ -3701,6 +3701,7 @@ private MessageConsumer newSignalStream( m.capabilities(c -> c.set(KafkaCapabilities.PRODUCE_AND_FETCH)) .topic(sessionsTopicName) .groupId(MQTT_CLIENTS_GROUP_ID) + .consumerId(serverRef) .filtersItem(f -> f.conditionsItem(c -> c.header(h -> h.nameLen(TYPE_HEADER_NAME_OCTETS.sizeof()) diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java index 14e620e9ee..edfdce43e3 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java @@ -45,6 +45,7 @@ public void shouldReadOptions() { String text = "{" + + "\"server\":\"mqtt-1.example.com:1883\"," + "\"topics\":" + "{" + "\"sessions\":\"sessions\"," + @@ -60,6 +61,7 @@ public void shouldReadOptions() assertThat(options.topics.sessions.asString(), equalTo("sessions")); assertThat(options.topics.messages.asString(), equalTo("messages")); assertThat(options.topics.retained.asString(), equalTo("retained")); + assertThat(options.serverRef, equalTo("mqtt-1.example.com:1883")); } @Test @@ -69,13 +71,14 @@ public void shouldWriteOptions() new MqttKafkaTopicsConfig( new String16FW("sessions"), new String16FW("messages"), - new String16FW("retained"))); + new String16FW("retained")), "mqtt-1.example.com:1883"); String text = jsonb.toJson(options); assertThat(text, not(nullValue())); assertThat(text, equalTo( "{" + + "\"server\":\"mqtt-1.example.com:1883\"," + "\"topics\":" + "{" + "\"sessions\":\"sessions\"," + diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index dc8c6f553b..cf0d647f66 100644 --- a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -221,13 +221,6 @@ public MqttSessionBeginExBuilder expiry( return this; } - public MqttSessionBeginExBuilder serverRef( - String serverRef) - { - sessionBeginExRW.serverRef(serverRef); - return this; - } - public MqttSessionBeginExBuilder flags( String... flagNames) { @@ -1279,7 +1272,6 @@ private boolean matchFilters( public final class MqttSessionBeginExMatcherBuilder { private String16FW clientId; - private String16FW serverRef; private Integer expiry; private Integer flags; @@ -1301,13 +1293,6 @@ public MqttSessionBeginExMatcherBuilder expiry( return this; } - public MqttSessionBeginExMatcherBuilder serverRef( - String serverRef) - { - this.serverRef = new String16FW(serverRef); - return this; - } - public MqttSessionBeginExMatcherBuilder flags( String... flagNames) { @@ -1328,8 +1313,7 @@ private boolean match( final MqttSessionBeginExFW sessionBeginEx = beginEx.session(); return matchClientId(sessionBeginEx) && matchExpiry(sessionBeginEx) && - matchFlags(sessionBeginEx) && - matchserverRef(sessionBeginEx); + matchFlags(sessionBeginEx); } private boolean matchClientId( @@ -1349,12 +1333,6 @@ private boolean matchFlags( { return flags == null || flags == sessionBeginEx.flags(); } - - private boolean matchserverRef( - final MqttSessionBeginExFW sessionBeginEx) - { - return serverRef == null || serverRef.equals(sessionBeginEx.serverRef()); - } } } diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index 0a2ac23134..711e3cb745 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -153,7 +153,6 @@ scope mqtt uint8 flags = 0; int32 expiry = 0; string16 clientId; - string16 serverRef = null; } struct MqttSubscribeBeginEx diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt index 3115255b0b..19ae2a4dc7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt @@ -22,7 +22,6 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverRef("mqtt-1.example.com:1883") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt index c0ec456d5e..dec7e5846d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt @@ -24,7 +24,6 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverRef("mqtt-1.example.com:1883") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt index 209a8946c2..20b8a30d30 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt @@ -22,7 +22,6 @@ write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverRef("mqtt-1.example.com:1883") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt index b069bf351c..f64d01cffc 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt @@ -24,7 +24,6 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() .clientId("client") - .serverRef("mqtt-1.example.com:1883") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index c118e4530d..6cc86dcb42 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -65,7 +65,6 @@ public void shouldEncodeMqttSessionBeginExt() .flags("WILL", "CLEAN_START") .expiry(30) .clientId("client") - .serverRef("mqtt-1.example.com:1883") .build() .build(); @@ -74,7 +73,6 @@ public void shouldEncodeMqttSessionBeginExt() assertEquals(2, mqttBeginEx.kind()); assertEquals("client", mqttBeginEx.session().clientId().asString()); - assertEquals("mqtt-1.example.com:1883", mqttBeginEx.session().serverRef().asString()); assertEquals(30, mqttBeginEx.session().expiry()); assertEquals(6, mqttBeginEx.session().flags()); } @@ -287,7 +285,6 @@ public void shouldMatchSessionBeginExtension() throws Exception .flags("CLEAN_START") .expiry(10) .clientId("client") - .serverRef("mqtt-1.example.com:1883") .build() .build(); @@ -299,8 +296,7 @@ public void shouldMatchSessionBeginExtension() throws Exception .session(s -> s .flags(2) .expiry(10) - .clientId("client") - .serverRef("mqtt-1.example.com:1883")) + .clientId("client")) .build(); assertNotNull(matcher.match(byteBuf)); diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 1c8a2969b1..ab7f4a0df9 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -1838,7 +1838,6 @@ private void resolveSession( .flags(flags) .expiry(sessionExpiry) .clientId(clientId) - .serverRef(serverRef) ); if (sessionStream == null) diff --git a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java index a981f005f6..9a5822a5b7 100644 --- a/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java +++ b/incubator/command-log/src/main/java/io/aklivity/zilla/runtime/command/log/internal/LoggableStream.java @@ -1351,10 +1351,9 @@ private void onMqttSessionBeginEx( { final String clientId = session.clientId().asString(); final int expiry = session.expiry(); - final String serverRef = session.serverRef().asString(); out.printf(verboseFormat, index, offset, timestamp, - format("[session] %s %d %s", clientId, expiry, serverRef)); + format("[session] %s %d", clientId, expiry)); } private void onMqttDataEx( From d72e19ef5840e39a2af1dad6e5e3f88c1720f9d8 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Sun, 17 Sep 2023 14:41:13 -0700 Subject: [PATCH 089/115] Fix flow control bug (#423) --- .../grpc/internal/stream/KafkaGrpcRemoteServerFactory.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerFactory.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerFactory.java index 16bce0db88..1d1cd28f75 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerFactory.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerFactory.java @@ -69,6 +69,7 @@ public final class KafkaGrpcRemoteServerFactory implements KafkaGrpcStreamFactor private static final String KAFKA_TYPE_NAME = "kafka"; private static final int SIGNAL_INITIATE_KAFKA_STREAM = 1; + private static final int GRPC_QUEUE_MESSAGE_PADDING = 3 * 256 + 33; private static final int DATA_FLAG_COMPLETE = 0x03; private static final int DATA_FLAG_INIT = 0x02; @@ -260,7 +261,7 @@ private KafkaRemoteServer( this.replyAck = 0; this.replyMax = bufferPool.slotCapacity(); this.replyBud = 0; - this.replyPad = 0; + this.replyPad = GRPC_QUEUE_MESSAGE_PADDING; this.replyCap = 0; this.errorProducer = new KafkaErrorProducer(originId, routedId, condition, this); this.grpcClients = new Object2ObjectHashMap<>(); @@ -494,7 +495,7 @@ private void flushGrpcMessagesIfBuffered( final int queuedMessageSize = queueMessage.sizeof(); final int oldProgressOffset = progressOffset; - progressOffset += queuedMessageSize; + progressOffset = queueMessage.limit(); if (correlationId.equals(messageCorrelationId)) { @@ -510,7 +511,7 @@ private void flushGrpcMessagesIfBuffered( final int remaining = grpcQueueSlotOffset - progressOffset; grpcQueueBuffer.putBytes(oldProgressOffset, grpcQueueBuffer, progressOffset, remaining); - grpcQueueSlotOffset = grpcQueueSlotOffset - progressOffset; + grpcQueueSlotOffset = oldProgressOffset + remaining; progressOffset = oldProgressOffset; } else if (progress > 0) From b19af9e2bcc295db9e7272fd6c5c127ad6a35ae8 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Mon, 18 Sep 2023 01:03:18 +0200 Subject: [PATCH 090/115] Buffer fragmented kafka session signal messages (#424) --- .../session.cancel.session.expiry/client.rpt | 4 +- .../session.cancel.session.expiry/server.rpt | 4 +- .../client.rpt | 66 ++++++++++ .../server.rpt | 70 +++++++++++ .../stream/MqttKafkaSessionFactory.java | 115 ++++++++++++------ .../stream/MqttKafkaSessionProxyIT.java | 9 ++ 6 files changed, 227 insertions(+), 41 deletions(-) create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt create mode 100644 incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt index 7280c56980..a5e991862a 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt @@ -39,8 +39,8 @@ read zilla:data.ext ${kafka:matchDataEx() .merged() .deferred(0) .partition(-1, -1) - .key("client-1#will-signal") - .header("type", "will-signal") + .key("client-1#expiry-signal") + .header("type", "expiry-signal") .build() .build()} read ${mqtt:sessionSignal() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt index 9f3362b0c9..0973d7b763 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt @@ -45,8 +45,8 @@ write zilla:data.ext ${kafka:dataEx() .merged() .deferred(0) .partition(-1, -1) - .key("client-1#will-signal") - .header("type", "will-signal") + .key("client-1#expiry-signal") + .header("type", "expiry-signal") .build() .build()} write ${mqtt:sessionSignal() diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt new file mode 100644 index 0000000000..a5e991862a --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt @@ -0,0 +1,66 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +connect "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .filter() + .header("type", "expiry-signal") + .build() + .build() + .build()} + +connected + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +read ${mqtt:sessionSignal() + .expiry() + .instanceId("zilla-1") + .clientId("client-1") + .delay(2000) + .expireAt(expireAt) + .build() + .build()} + +read zilla:data.ext ${kafka:matchDataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +read zilla:data.null + diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt new file mode 100644 index 0000000000..92d6b90b1f --- /dev/null +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt @@ -0,0 +1,70 @@ +# +# Copyright 2021-2023 Aklivity Inc +# +# Licensed under the Aklivity Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# https://www.aklivity.io/aklivity-community-license/ +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +property delayMillis 2000L +property expireAt ${mqtt:timestamp() + delayMillis} + +accept "zilla://streams/kafka0" + option zilla:window 8192 + option zilla:transmission "duplex" + + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .merged() + .capabilities("PRODUCE_AND_FETCH") + .topic("mqtt-sessions") + .groupId("mqtt-clients") + .filter() + .header("type", "will-signal") + .build() + .filter() + .header("type", "expiry-signal") + .build() + .build() + .build()} + +connected + +write option zilla:flags "init" +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .header("type", "expiry-signal") + .build() + .build()} +write [0x01 0x07 0x00 0x7a 0x69 0x6c 0x6c 0x61 0x2d 0x31 0x08 0x00 0x63 0x6c 0x69 0x65 0x6e 0x74 0x2d] +write flush + +write option zilla:flags "fin" +write [0x31 0xd0 0x07 0x00 0x00 0xbe 0x35 0x1a 0xa5 0x8a 0x01 0x00 0x00] +write flush + +write zilla:data.ext ${kafka:dataEx() + .typeId(zilla:id("kafka")) + .merged() + .deferred(0) + .partition(-1, -1) + .key("client-1#expiry-signal") + .hashKey("client-1") + .header("type", "expiry-signal") + .build() + .build()} +write flush diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index 8ca0c591f5..d5f4c8ccda 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -118,6 +118,8 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private static final OctetsFW EXPIRY_SIGNAL_NAME_OCTETS = new OctetsFW().wrap(EXPIRY_SIGNAL_NAME.value(), 0, EXPIRY_SIGNAL_NAME.length()); private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); + private static final int DATA_FLAG_INIT = 0x02; + private static final int DATA_FLAG_FIN = 0x01; private static final int DATA_FLAG_COMPLETE = 0x03; public static final String MQTT_CLIENTS_GROUP_ID = "mqtt-clients"; private static final int SIGNAL_DELIVER_WILL_MESSAGE = 1; @@ -912,6 +914,8 @@ public final class KafkaSignalStream private long replyAck; private int replyMax; private long reconnectAt; + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; private KafkaSignalStream( long originId, @@ -1105,6 +1109,7 @@ private void onKafkaData( { final OctetsFW extension = data.extension(); final OctetsFW payload = data.payload(); + final int flags = data.flags(); final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); final KafkaDataExFW kafkaDataEx = dataEx != null && dataEx.typeId() == kafkaTypeId ? extension.get(kafkaDataExRO::tryWrap) : null; @@ -1113,9 +1118,8 @@ private void onKafkaData( final KafkaKeyFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.key() : null; reactToSignal: - if (key != null) { - if (payload == null) + if (key != null && payload == null && (flags & DATA_FLAG_FIN) != 0x00) { final OctetsFW type = kafkaMergedDataEx.headers() .matchFirst(h -> h.name().equals(TYPE_HEADER_NAME_OCTETS)).value(); @@ -1144,52 +1148,89 @@ else if (type.equals(EXPIRY_SIGNAL_NAME_OCTETS) && sessionExpiryIds.containsKey( break reactToSignal; } - final MqttSessionSignalFW sessionSignal = - mqttSessionSignalRO.wrap(payload.buffer(), payload.offset(), payload.limit()); + DirectBuffer buffer = payload.buffer(); + int offset = payload.offset(); + int limit = payload.limit(); + int length = limit - offset; - switch (sessionSignal.kind()) + if ((flags & DATA_FLAG_FIN) == 0x00) { - case MqttSessionSignalFW.KIND_WILL: - final MqttWillSignalFW willSignal = sessionSignal.will(); - long deliverAt = willSignal.deliverAt(); - final String16FW willClientId = willSignal.clientId(); + if (decodeSlot == NO_SLOT) + { + decodeSlot = bufferPool.acquire(replyId); + assert decodeSlotOffset == 0; + } - if (deliverAt == MqttTime.UNKNOWN.value()) + final MutableDirectBuffer slotBuffer = bufferPool.buffer(decodeSlot); + slotBuffer.putBytes(decodeSlotOffset, buffer, offset, length); + decodeSlotOffset += length; + } + else + { + if (decodeSlot != NO_SLOT) { - if (instanceId.instanceId().equals(willSignal.instanceId())) - { - break reactToSignal; - } - deliverAt = supplyTime.getAsLong() + willSignal.delay(); + final MutableDirectBuffer slotBuffer = bufferPool.buffer(decodeSlot); + slotBuffer.putBytes(decodeSlotOffset, buffer, offset, length); + buffer = slotBuffer; + offset = 0; + limit = decodeSlotOffset + length; } - KafkaFetchWillStream willFetcher = - new KafkaFetchWillStream(originId, routedId, this, sessionsTopic, willClientId, - willSignal.willId().asString(), willSignal.lifetimeId().asString(), deliverAt); - willFetcher.doKafkaBegin(traceId, authorization, 0, willSignal.lifetimeId()); - willFetchers.put(new String16FW(willClientId.asString()), willFetcher); - break; - case MqttSessionSignalFW.KIND_EXPIRY: - final MqttExpirySignalFW expirySignal = sessionSignal.expiry(); - long expireAt = expirySignal.expireAt(); - final String16FW expiryClientId = expirySignal.clientId(); - - if (expireAt == MqttTime.UNKNOWN.value()) + final MqttSessionSignalFW sessionSignal = + mqttSessionSignalRO.wrap(buffer, offset, limit); + byte[] bytes = new byte[sessionSignal.sizeof()]; + + switch (sessionSignal.kind()) { - if (instanceId.instanceId().equals(expirySignal.instanceId())) + case MqttSessionSignalFW.KIND_WILL: + final MqttWillSignalFW willSignal = sessionSignal.will(); + long deliverAt = willSignal.deliverAt(); + final String16FW willClientId = willSignal.clientId(); + + if (deliverAt == MqttTime.UNKNOWN.value()) { - break reactToSignal; + if (instanceId.instanceId().equals(willSignal.instanceId())) + { + break reactToSignal; + } + deliverAt = supplyTime.getAsLong() + willSignal.delay(); + } + + KafkaFetchWillStream willFetcher = + new KafkaFetchWillStream(originId, routedId, this, sessionsTopic, willClientId, + willSignal.willId().asString(), willSignal.lifetimeId().asString(), deliverAt); + willFetcher.doKafkaBegin(traceId, authorization, 0, willSignal.lifetimeId()); + willFetchers.put(new String16FW(willClientId.asString()), willFetcher); + break; + case MqttSessionSignalFW.KIND_EXPIRY: + final MqttExpirySignalFW expirySignal = sessionSignal.expiry(); + long expireAt = expirySignal.expireAt(); + final String16FW expiryClientId = expirySignal.clientId(); + + if (expireAt == MqttTime.UNKNOWN.value()) + { + if (instanceId.instanceId().equals(expirySignal.instanceId())) + { + break reactToSignal; + } + expireAt = supplyTime.getAsLong() + expirySignal.delay(); } - expireAt = supplyTime.getAsLong() + expirySignal.delay(); - } - final int contextId = CONTEXT_COUNTER.incrementAndGet(); - expiryClientIds.put(contextId, expiryClientId); + final int contextId = CONTEXT_COUNTER.incrementAndGet(); + expiryClientIds.put(contextId, expiryClientId); - final long signalId = - signaler.signalAt(expireAt, originId, routedId, initialId, SIGNAL_EXPIRE_SESSION, contextId); - sessionExpiryIds.put(expiryClientId, signalId); - break; + final long signalId = + signaler.signalAt(expireAt, originId, routedId, initialId, SIGNAL_EXPIRE_SESSION, contextId); + sessionExpiryIds.put(expiryClientId, signalId); + break; + } + + if (decodeSlot != NO_SLOT) + { + bufferPool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + } } } } diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java index cf7312a8aa..72ce0cd198 100644 --- a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java +++ b/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java @@ -245,6 +245,15 @@ public void shouldCancelSessionExpiry() throws Exception k3po.finish(); } + @Test + @Configuration("proxy.yaml") + @Specification({ + "${kafka}/session.session.expiry.fragmented/server"}) + public void shouldDecodeSessionExpirySignalFragmented() throws Exception + { + k3po.finish(); + } + @Test @Configuration("proxy.yaml") @Specification({ From 4437f2a187b627bfe430b89080a7ac3a27f95c8e Mon Sep 17 00:00:00 2001 From: bmaidics Date: Mon, 18 Sep 2023 18:28:40 +0200 Subject: [PATCH 091/115] Enhance mqtt binding configuration syntax (#425) --- .../main/resources/META-INF/zilla/mqtt.idl | 7 -- ...ons.yaml => server.route.non.default.yaml} | 13 +- .../server.when.topic.publish.only.yaml | 27 ----- .../server.when.topic.subscribe.only.yaml | 27 ----- .../mqtt/config/server.when.topic.yaml | 26 ---- .../mqtt/schema/mqtt.schema.patch.json | 43 +++++-- .../client.rpt | 105 ++++++++++++++++ .../server.rpt | 100 ++++++++++++++++ .../client.rpt | 58 +++++++++ .../server.rpt | 59 +++++++++ .../specs/binding/mqtt/config/SchemaTest.java | 28 +---- .../mqtt/streams/application/SessionIT.java | 9 ++ .../mqtt/streams/network/SessionIT.java | 9 ++ .../mqtt/config/MqttConditionConfig.java | 17 +-- .../config/MqttConditionConfigBuilder.java | 32 +++-- .../mqtt/config/MqttPublishConfig.java | 27 +++++ .../mqtt/config/MqttSessionConfig.java | 27 +++++ .../mqtt/config/MqttSubscribeConfig.java | 27 +++++ .../internal/config/MqttBindingConfig.java | 24 ++-- .../config/MqttConditionConfigAdapter.java | 80 +++++++++++-- .../internal/config/MqttConditionMatcher.java | 113 ++++++++++++++---- .../mqtt/internal/config/MqttRouteConfig.java | 20 ++-- .../internal/stream/MqttServerFactory.java | 21 ++-- .../MqttConditionConfigAdapterTest.java | 51 ++++++-- .../mqtt/internal/stream/SessionIT.java | 13 ++ 25 files changed, 750 insertions(+), 213 deletions(-) rename incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/{server.when.sessions.yaml => server.route.non.default.yaml} (73%) delete mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.publish.only.yaml delete mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.subscribe.only.yaml delete mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.yaml create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt create mode 100644 incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java create mode 100644 incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java create mode 100644 incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index 711e3cb745..7f7395edf5 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -41,13 +41,6 @@ scope mqtt RETAIN(0) } - enum MqttCapabilities (uint8) - { - PUBLISH_ONLY(1), - SUBSCRIBE_ONLY(2), - SESSION(3) - } - enum MqttPayloadFormat { BINARY, diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.sessions.yaml b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.route.non.default.yaml similarity index 73% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.sessions.yaml rename to incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.route.non.default.yaml index 1b38a58862..38e16f2c5c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.sessions.yaml +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.route.non.default.yaml @@ -21,6 +21,13 @@ bindings: type: mqtt kind: server routes: - - exit: app0 - when: - - capabilities: session + - when: + - session: + - client-id: "*" + - publish: + - topic: command/one + - topic: command/two + - subscribe: + - topic: reply + exit: app1 + exit: app0 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.publish.only.yaml b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.publish.only.yaml deleted file mode 100644 index 2d55dc5836..0000000000 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.publish.only.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc. -# -# Aklivity licenses this file to you under the Apache License, -# version 2.0 (the "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - ---- -name: test -bindings: - net0: - type: mqtt - kind: server - routes: - - exit: app0 - when: - - topic: sensor/one - capabilities: publish diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.subscribe.only.yaml b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.subscribe.only.yaml deleted file mode 100644 index b6ab15bf65..0000000000 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.subscribe.only.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc. -# -# Aklivity licenses this file to you under the Apache License, -# version 2.0 (the "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - ---- -name: test -bindings: - net0: - type: mqtt - kind: server - routes: - - exit: app0 - when: - - topic: sensor/one - capabilities: subscribe diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.yaml b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.yaml deleted file mode 100644 index 765e5b4c09..0000000000 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.when.topic.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc. -# -# Aklivity licenses this file to you under the Apache License, -# version 2.0 (the "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - ---- -name: test -bindings: - net0: - type: mqtt - kind: server - routes: - - exit: app0 - when: - - topic: sensor/one diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json index f574e92425..c20d1c71e6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json @@ -125,17 +125,44 @@ "additionalProperties": false, "properties": { - "topic": + "session": { - "title": "Topic", - "type": "string" + "title": "Session", + "type": "array", + "items": + { + "client-id": + { + "title": "Client Id", + "type": "string" + } + } }, - "capabilities": + "subscribe": { - "title": "Capabilities", - "type": "string", - "enum": [ "session", "publish", "subscribe", "publish_and_subscribe" ], - "default": "publish_and_subscribe" + "title": "Subscribe", + "type": "array", + "items": + { + "topic": + { + "title": "Topic", + "type": "string" + } + } + }, + "publish": + { + "title": "Subscribe", + "type": "array", + "items": + { + "topic": + { + "title": "Topic", + "type": "string" + } + } } } } diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt new file mode 100644 index 0000000000..6930722b7f --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt @@ -0,0 +1,105 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("reply", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("reply", 1) + .build()} + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("reply", 1) + .build() + .build()} + +connected + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("command/one") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .build() + .build()} +write "message1" +write flush + + +connect "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("command/two") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .build() + .build()} +write "message2" +write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt new file mode 100644 index 0000000000..90796c2368 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt @@ -0,0 +1,100 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app1" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("reply", 1) + .build()} + +write ${mqtt:session() + .subscription("reply", 1) + .build()} +write flush + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("reply", 1) + .build() + .build()} + +connected + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("command/one") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .build() + .build()} +read "message1" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("command/two") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .build() + .build()} +read "message2" + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt new file mode 100644 index 0000000000..a925e86251 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt @@ -0,0 +1,58 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +write [0x82 0x0d] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x05] "reply" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +write [0x30 0x16] # PUBLISH + [0x00 0x0b] "command/one" # topic name + [0x00] # properties + "message1" # payload + +write [0x30 0x16] # PUBLISH + [0x00 0x0b] "command/two" # topic name + [0x00] # properties + "message2" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt new file mode 100644 index 0000000000..a430d57395 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt @@ -0,0 +1,59 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +read [0x82 0x0d] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x05] "reply" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +read [0x30 0x16] # PUBLISH + [0x00 0x0b] "command/one" # topic name + [0x00] # properties + "message1" # payload + +read [0x30 0x16] # PUBLISH + [0x00 0x0b] "command/two" # topic name + [0x00] # properties + "message2" # payload diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java index 2021142ca5..c1d97f1a32 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java @@ -97,33 +97,9 @@ public void shouldValidateServerWithAuthorizationOptions() } @Test - public void shouldValidateServerWhenTopic() + public void shouldValidateServerWhenRouteToNonDefault() { - JsonObject config = schema.validate("server.when.topic.yaml"); - - assertThat(config, not(nullValue())); - } - - @Test - public void shouldValidateServerWhenTopicOrSessions() - { - JsonObject config = schema.validate("server.when.sessions.yaml"); - - assertThat(config, not(nullValue())); - } - - @Test - public void shouldValidateServerWhenTopicPublishOnly() - { - JsonObject config = schema.validate("server.when.topic.publish.only.yaml"); - - assertThat(config, not(nullValue())); - } - - @Test - public void shouldValidateServerWhenTopicSubscribeOnly() - { - JsonObject config = schema.validate("server.when.topic.subscribe.only.yaml"); + JsonObject config = schema.validate("server.route.non.default.yaml"); assertThat(config, not(nullValue())); } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java index 0b920583bc..61e8b7ce38 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java @@ -181,4 +181,13 @@ public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/session.subscribe.publish.routing/client", + "${app}/session.subscribe.publish.routing/server"}) + public void shouldSubscribeAndPublishToNonDefaultRoute() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java index e80e349914..5862d0c27b 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java @@ -182,4 +182,13 @@ public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/session.subscribe.publish.routing/client", + "${net}/session.subscribe.publish.routing/server"}) + public void shouldSubscribeAndPublishToNonDefaultRoute() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java index f8b8b45575..7ce8a7fdcd 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java @@ -15,15 +15,16 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.config; +import java.util.List; import java.util.function.Function; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; public final class MqttConditionConfig extends ConditionConfig { - public final String topic; - public final MqttCapabilities capabilities; + public final List sessions; + public final List subscribes; + public final List publishes; public static MqttConditionConfigBuilder builder() { @@ -37,11 +38,13 @@ public static MqttConditionConfigBuilder builder( } MqttConditionConfig( - String topic, - MqttCapabilities capabilities) + List sessions, + List subscribes, + List publishes) { - this.topic = topic; - this.capabilities = capabilities; + this.sessions = sessions; + this.subscribes = subscribes; + this.publishes = publishes; } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java index a9db039b3d..00276b09ec 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java @@ -15,9 +15,10 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.config; +import java.util.ArrayList; +import java.util.List; import java.util.function.Function; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; @@ -25,13 +26,17 @@ public final class MqttConditionConfigBuilder extends ConfigBuilder mapper; - private String topic; - private MqttCapabilities capabilities; + private final List session; + private final List subscribe; + private final List publish; MqttConditionConfigBuilder( Function mapper) { this.mapper = mapper; + this.session = new ArrayList<>(); + this.subscribe = new ArrayList<>(); + this.publish = new ArrayList<>(); } @Override @@ -41,23 +46,30 @@ protected Class> thisType() return (Class>) getClass(); } - public MqttConditionConfigBuilder topic( - String topic) + public MqttConditionConfigBuilder session( + MqttSessionConfig session) { - this.topic = topic; + this.session.add(session); return this; } - public MqttConditionConfigBuilder capabilities( - MqttCapabilities capabilities) + public MqttConditionConfigBuilder subscribe( + MqttSubscribeConfig subscribe) { - this.capabilities = capabilities; + this.subscribe.add(subscribe); + return this; + } + + public MqttConditionConfigBuilder publish( + MqttPublishConfig publish) + { + this.publish.add(publish); return this; } @Override public T build() { - return mapper.apply(new MqttConditionConfig(topic, capabilities)); + return mapper.apply(new MqttConditionConfig(session, subscribe, publish)); } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java new file mode 100644 index 0000000000..5168244ffb --- /dev/null +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java @@ -0,0 +1,27 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.config; + +public class MqttPublishConfig +{ + public final String topic; + + public MqttPublishConfig( + String topic) + { + this.topic = topic; + } +} diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java new file mode 100644 index 0000000000..2f8ed7c73f --- /dev/null +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java @@ -0,0 +1,27 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.config; + +public class MqttSessionConfig +{ + public final String clientId; + + public MqttSessionConfig( + String clientId) + { + this.clientId = clientId; + } +} diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java new file mode 100644 index 0000000000..909cd75929 --- /dev/null +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java @@ -0,0 +1,27 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.config; + +public class MqttSubscribeConfig +{ + public final String topic; + + public MqttSubscribeConfig( + String topic) + { + this.topic = topic; + } +} diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java index 0d5ef78b8d..529623c069 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java @@ -26,7 +26,6 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttConnectProperty; import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttCredentialsConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttAuthorizationConfig.MqttPatternConfig; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.BindingConfig; import io.aklivity.zilla.runtime.engine.config.KindConfig; @@ -64,23 +63,32 @@ public MqttRouteConfig resolve( .orElse(null); } - public MqttRouteConfig resolve( + public MqttRouteConfig resolveSession( long authorization, - MqttCapabilities capabilities) + String clientId) { return routes.stream() - .filter(r -> r.authorized(authorization) && r.matches(capabilities)) + .filter(r -> r.authorized(authorization) && r.matchesSession(clientId)) .findFirst() .orElse(null); } - public MqttRouteConfig resolve( + public MqttRouteConfig resolveSubscribe( + long authorization, + String topic) + { + return routes.stream() + .filter(r -> r.authorized(authorization) && r.matchesSubscribe(topic)) + .findFirst() + .orElse(null); + } + + public MqttRouteConfig resolvePublish( long authorization, - String topic, - MqttCapabilities capabilities) + String topic) { return routes.stream() - .filter(r -> r.authorized(authorization) && r.matches(topic, capabilities)) + .filter(r -> r.authorized(authorization) && r.matchesPublish(topic)) .findFirst() .orElse(null); } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java index c9464a035a..08eb3a7b94 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java @@ -16,21 +16,29 @@ package io.aklivity.zilla.runtime.binding.mqtt.internal.config; import jakarta.json.Json; +import jakarta.json.JsonArray; +import jakarta.json.JsonArrayBuilder; import jakarta.json.JsonObject; import jakarta.json.JsonObjectBuilder; import jakarta.json.bind.adapter.JsonbAdapter; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfigBuilder; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttPublishConfig; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttSessionConfig; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttSubscribeConfig; import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBinding; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.ConditionConfig; import io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi; public final class MqttConditionConfigAdapter implements ConditionConfigAdapterSpi, JsonbAdapter { + private static final String SESSION_NAME = "session"; + private static final String SUBSCRIBE_NAME = "subscribe"; + private static final String PUBLISH_NAME = "publish"; + private static final String CLIENT_ID_NAME = "client-id"; private static final String TOPIC_NAME = "topic"; - private static final String CAPABILITIES_NAME = "capabilities"; + public static final String CLIENT_ID_DEFAULT = "*"; @Override public String type() @@ -46,14 +54,43 @@ public JsonObject adaptToJson( JsonObjectBuilder object = Json.createObjectBuilder(); - if (mqttCondition.topic != null) + if (!mqttCondition.sessions.isEmpty()) { - object.add(TOPIC_NAME, mqttCondition.topic); + JsonArrayBuilder sessions = Json.createArrayBuilder(); + + mqttCondition.sessions.forEach(p -> + { + JsonObjectBuilder sessionJson = Json.createObjectBuilder(); + sessionJson.add(CLIENT_ID_NAME, p.clientId); + sessions.add(sessionJson); + }); + object.add(SESSION_NAME, sessions); } - if (mqttCondition.capabilities != null) + if (!mqttCondition.subscribes.isEmpty()) { - object.add(CAPABILITIES_NAME, mqttCondition.capabilities.toString().toLowerCase()); + JsonArrayBuilder subscribes = Json.createArrayBuilder(); + + mqttCondition.subscribes.forEach(s -> + { + JsonObjectBuilder subscribeJson = Json.createObjectBuilder(); + subscribeJson.add(TOPIC_NAME, s.topic); + subscribes.add(subscribeJson); + }); + object.add(SUBSCRIBE_NAME, subscribes); + } + + if (!mqttCondition.publishes.isEmpty()) + { + JsonArrayBuilder publishes = Json.createArrayBuilder(); + + mqttCondition.publishes.forEach(p -> + { + JsonObjectBuilder publishJson = Json.createObjectBuilder(); + publishJson.add(TOPIC_NAME, p.topic); + publishes.add(publishJson); + }); + object.add(PUBLISH_NAME, publishes); } return object.build(); @@ -65,14 +102,37 @@ public ConditionConfig adaptFromJson( { MqttConditionConfigBuilder mqttConfig = MqttConditionConfig.builder(); - if (object.containsKey(TOPIC_NAME)) + if (object.containsKey(SESSION_NAME)) + { + JsonArray sessionsJson = object.getJsonArray(SESSION_NAME); + sessionsJson.forEach(s -> + { + String clientId = s.asJsonObject().getString(CLIENT_ID_NAME, CLIENT_ID_DEFAULT); + MqttSessionConfig session = new MqttSessionConfig(clientId); + mqttConfig.session(session); + }); + } + + if (object.containsKey(SUBSCRIBE_NAME)) { - mqttConfig.topic(object.getString(TOPIC_NAME)); + JsonArray subscribesJson = object.getJsonArray(SUBSCRIBE_NAME); + subscribesJson.forEach(s -> + { + String topic = s.asJsonObject().getString(TOPIC_NAME); + MqttSubscribeConfig subscribe = new MqttSubscribeConfig(topic); + mqttConfig.subscribe(subscribe); + }); } - if (object.containsKey(CAPABILITIES_NAME)) + if (object.containsKey(PUBLISH_NAME)) { - mqttConfig.capabilities(MqttCapabilities.valueOf(object.getString(CAPABILITIES_NAME).toUpperCase())); + JsonArray publishesJson = object.getJsonArray(PUBLISH_NAME); + publishesJson.forEach(p -> + { + String topic = p.asJsonObject().getString(TOPIC_NAME); + MqttPublishConfig publish = new MqttPublishConfig(topic); + mqttConfig.publish(publish); + }); } return mqttConfig.build(); diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java index c1de099fea..1c18272037 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java @@ -15,57 +15,120 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.internal.config; +import java.util.ArrayList; +import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; public final class MqttConditionMatcher { - private final Matcher topicMatch; - private final MqttCapabilities capabilitiesMatch; + private final List sessionMatchers; + private final List subscribeMatchers; + private final List publishMatchers; public MqttConditionMatcher( MqttConditionConfig condition) { - this.topicMatch = condition.topic != null ? asMatcher(condition.topic) : null; - this.capabilitiesMatch = condition.capabilities; + this.sessionMatchers = + condition.sessions != null ? + asWildcardMatcher(condition.sessions.stream().map(s -> s.clientId).collect(Collectors.toList())) : null; + this.subscribeMatchers = + condition.subscribes != null ? + asTopicMatcher(condition.subscribes.stream().map(s -> s.topic).collect(Collectors.toList())) : null; + this.publishMatchers = + condition.publishes != null ? + asTopicMatcher(condition.publishes.stream().map(s -> s.topic).collect(Collectors.toList())) : null; } - public boolean matches( - MqttCapabilities capabilities) + public boolean matchesSession( + String clientId) { - return matchesCapabilities(capabilities); + boolean match = false; + if (sessionMatchers != null) + { + for (Matcher matcher : sessionMatchers) + { + match = matcher.reset(clientId).matches(); + if (match) + { + break; + } + } + } + return match; } - public boolean matches( - String topic, - MqttCapabilities capabilities) + public boolean matchesSubscribe( + String topic) { - return matchesTopic(topic) && - matchesCapabilities(capabilities); + boolean match = false; + if (subscribeMatchers != null) + { + for (Matcher matcher : subscribeMatchers) + { + match = matcher.reset(topic).matches(); + if (match) + { + break; + } + } + } + return match; } - private boolean matchesTopic( + public boolean matchesPublish( String topic) { - return this.topicMatch == null || this.topicMatch.reset(topic).matches(); + boolean match = false; + if (publishMatchers != null) + { + for (Matcher matcher : publishMatchers) + { + match = matcher.reset(topic).matches(); + if (match) + { + break; + } + } + } + return match; } - private boolean matchesCapabilities( - MqttCapabilities capabilities) + private static List asWildcardMatcher( + List wildcards) { - return this.capabilitiesMatch == null || (this.capabilitiesMatch.value() & capabilities.value()) != 0; + List matchers = new ArrayList<>(); + for (String wildcard : wildcards) + { + String pattern = wildcard.replace(".", "\\.").replace("*", ".*"); + + if (!pattern.endsWith(".*")) + { + pattern = pattern + "(\\?.*)?"; + } + matchers.add(Pattern.compile(pattern).matcher("")); + + } + + return matchers; } - private static Matcher asMatcher( - String wildcard) + private static List asTopicMatcher( + List wildcards) { - return Pattern.compile(wildcard - .replace(".", "\\.") - .replace("$", "\\$") - .replace("+", "[^/]*") - .replace("#", ".*")).matcher(""); + List matchers = new ArrayList<>(); + for (String wildcard : wildcards) + { + matchers.add(Pattern.compile(wildcard + .replace(".", "\\.") + .replace("$", "\\$") + .replace("+", "[^/]*") + .replace("#", ".*")).matcher("")); + + } + return matchers; } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java index be2e40fda1..859407ec51 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java @@ -21,7 +21,6 @@ import java.util.function.LongPredicate; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.engine.config.OptionsConfig; import io.aklivity.zilla.runtime.engine.config.RouteConfig; @@ -49,16 +48,21 @@ boolean authorized( return authorized.test(authorization); } - boolean matches( - MqttCapabilities capabilities) + boolean matchesSession( + String clientId) { - return when.isEmpty() || when.stream().anyMatch(m -> m.matches(capabilities)); + return when.isEmpty() || when.stream().anyMatch(m -> m.matchesSession(clientId)); } - boolean matches( - String topic, - MqttCapabilities capabilities) + boolean matchesSubscribe( + String topic) { - return when.isEmpty() || when.stream().anyMatch(m -> m.matches(topic, capabilities)); + return when.isEmpty() || when.stream().anyMatch(m -> m.matchesSubscribe(topic)); + } + + boolean matchesPublish( + String topic) + { + return when.isEmpty() || when.stream().anyMatch(m -> m.matchesPublish(topic)); } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index ab7f4a0df9..d0126aed5b 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -111,7 +111,6 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Array32FW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttBinaryFW; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttPayloadFormat; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttQoS; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttSessionStateFW; @@ -1672,10 +1671,6 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) break decode; } - final MqttBindingConfig binding = bindings.get(routedId); - - final MqttRouteConfig resolved = binding != null ? binding.resolve(authorization) : null; - keepAlive = (short) Math.min(Math.max(connect.keepAlive(), keepAliveMinimum), keepAliveMaximum); serverDefinedKeepAlive = keepAlive != connect.keepAlive(); keepAliveTimeout = Math.round(TimeUnit.SECONDS.toMillis(keepAlive) * 1.5); @@ -1684,7 +1679,7 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) if (session) { - resolveSession(traceId, authorization, resolved.id, connectFlags); + resolveSession(traceId, authorization, connectFlags); } doCancelConnectTimeout(); @@ -1829,9 +1824,12 @@ else if (this.authField.equals(MqttConnectProperty.PASSWORD)) private void resolveSession( long traceId, long authorization, - long resolvedId, int flags) { + final MqttBindingConfig binding = bindings.get(routedId); + + final MqttRouteConfig resolved = binding != null ? binding.resolveSession(authorization, clientId.asString()) : null; + final MqttBeginExFW.Builder builder = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) .typeId(mqttTypeId) .session(s -> s @@ -1842,7 +1840,7 @@ private void resolveSession( if (sessionStream == null) { - sessionStream = new MqttSessionStream(originId, resolvedId, 0); + sessionStream = new MqttSessionStream(originId, resolved.id, 0); } sessionStream.doSessionBegin(traceId, affinity, builder.build()); @@ -1857,7 +1855,7 @@ private MqttPublishStream resolvePublishStream( final MqttBindingConfig binding = bindings.get(routedId); final MqttRouteConfig resolved = binding != null ? - binding.resolve(sessionId, topic, MqttCapabilities.PUBLISH_ONLY) : null; + binding.resolvePublish(sessionId, topic) : null; if (resolved != null) { @@ -1923,7 +1921,6 @@ private void onDecodeSubscribe( int subscriptionId = 0; boolean containsSubscriptionId = false; - int unrouteableMask = 0; MqttPropertiesFW properties = subscribe.properties(); final OctetsFW propertiesValue = properties.value(); @@ -2071,7 +2068,7 @@ private void openSubscribeStreams( { final MqttBindingConfig binding = bindings.get(routedId); final MqttRouteConfig resolved = - binding != null ? binding.resolve(sessionId, subscription.filter, MqttCapabilities.SUBSCRIBE_ONLY) : null; + binding != null ? binding.resolveSubscribe(sessionId, subscription.filter) : null; if (resolved != null) { @@ -2205,7 +2202,7 @@ private void sendUnsuback( { final MqttBindingConfig binding = bindings.get(routedId); final MqttRouteConfig resolved = - binding != null ? binding.resolve(sessionId, topicFilter, MqttCapabilities.SUBSCRIBE_ONLY) : null; + binding != null ? binding.resolveSubscribe(sessionId, topicFilter) : null; final int subscribeKey = subscribeKey(clientId.asString(), resolved.id); final MqttSubscribeStream stream = subscribeStreams.get(subscribeKey); diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java index f56647f677..730334c0ee 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java @@ -15,8 +15,6 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.internal.config; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities.PUBLISH_ONLY; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttCapabilities.SUBSCRIBE_ONLY; import static java.util.function.Function.identity; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -31,6 +29,9 @@ import org.junit.Test; import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttPublishConfig; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttSessionConfig; +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttSubscribeConfig; public class MqttConditionConfigAdapterTest { @@ -49,15 +50,43 @@ public void shouldReadCondition() { String text = "{" + - "\"topic\": \"test\"," + - "\"capabilities\": \"publish_only\"" + + "\"session\":" + + "[" + + "{" + + "\"client-id\": \"*\"" + + "}" + + "]," + + "\"subscribe\":" + + "[" + + "{" + + "\"topic\": \"reply/one\"" + + "}," + + "{" + + "\"topic\": \"reply/two\"" + + "}," + + "]," + + "\"publish\":" + + "[" + + "{" + + "\"topic\": \"command/one\"" + + "}," + + "{" + + "\"topic\": \"command/two\"" + + "}" + + "]" + "}"; MqttConditionConfig condition = jsonb.fromJson(text, MqttConditionConfig.class); assertThat(condition, not(nullValue())); - assertThat(condition.topic, equalTo("test")); - assertThat(condition.capabilities, equalTo(PUBLISH_ONLY)); + assertThat(condition.sessions, not(nullValue())); + assertThat(condition.sessions.get(0).clientId, equalTo("*")); + assertThat(condition.subscribes, not(nullValue())); + assertThat(condition.subscribes.get(0).topic, equalTo("reply/one")); + assertThat(condition.subscribes.get(1).topic, equalTo("reply/two")); + assertThat(condition.publishes, not(nullValue())); + assertThat(condition.publishes.get(0).topic, equalTo("command/one")); + assertThat(condition.publishes.get(1).topic, equalTo("command/two")); } @Test @@ -65,13 +94,17 @@ public void shouldWriteCondition() { MqttConditionConfig condition = MqttConditionConfig.builder() .inject(identity()) - .topic("test") - .capabilities(SUBSCRIBE_ONLY) + .session(new MqttSessionConfig("*")) + .subscribe(new MqttSubscribeConfig("reply/one")) + .subscribe(new MqttSubscribeConfig("reply/two")) + .publish(new MqttPublishConfig("command/one")) + .publish(new MqttPublishConfig("command/two")) .build(); String text = jsonb.toJson(condition); assertThat(text, not(nullValue())); - assertThat(text, equalTo("{\"topic\":\"test\",\"capabilities\":\"subscribe_only\"}")); + assertThat(text, equalTo("{\"session\":[{\"client-id\":\"*\"}],\"subscribe\":[{\"topic\":\"reply/one\"}," + + "{\"topic\":\"reply/two\"}],\"publish\":[{\"topic\":\"command/one\"},{\"topic\":\"command/two\"}]}")); } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java index afa0169aa8..c941c7dc33 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java @@ -53,6 +53,7 @@ public class SessionIT .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") .external("app0") + .external("app1") .clean(); @Rule @@ -277,4 +278,16 @@ public void shouldRedirectBeforeConnack() throws Exception { k3po.finish(); } + + @Test + @Configuration("server.route.non.default.yaml") + @Specification({ + "${net}/session.subscribe.publish.routing/client", + "${app}/session.subscribe.publish.routing/server"}) + @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + @Configure(name = MAXIMUM_QOS_NAME, value = "2") + public void shouldSubscribeAndPublishToNonDefaultRoute() throws Exception + { + k3po.finish(); + } } From d395bb9dbf43019d5b213f92239b879ca0277365 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Mon, 18 Sep 2023 21:03:13 -0700 Subject: [PATCH 092/115] Fix known issues in group client (#428) --- .../stream/KafkaClientGroupFactory.java | 196 +++++++++++------- .../kafka/internal/stream/ClientGroupIT.java | 10 + .../client.rpt | 72 +++++++ .../server.rpt | 73 +++++++ .../client.rpt | 187 +++++++++++++++++ .../server.rpt | 171 +++++++++++++++ .../client.rpt | 84 -------- .../server.rpt | 66 ------ .../kafka/streams/application/GroupIT.java | 9 + .../kafka/streams/network/GroupIT.java | 9 + 10 files changed, 656 insertions(+), 221 deletions(-) create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt create mode 100644 specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index a769bf0733..00d82d4033 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -338,15 +338,11 @@ public MessageConsumer newStream( final GroupMembership groupMembership = instanceIds.get(binding.id); assert groupMembership != null; - KafkaGroupStream stream = groupStreams.get(groupId); - if (stream == null || HIGHLANDER_PROTOCOL.equals(protocol)) - { - if (stream != null) - { - stream.streamCleanup(traceId, traceId); - } + KafkaGroupStream group = groupStreams.get(groupId); - KafkaGroupStream group = new KafkaGroupStream( + if (group == null) + { + KafkaGroupStream newGroup = new KafkaGroupStream( application, originId, routedId, @@ -358,9 +354,14 @@ public MessageConsumer newStream( timeout, groupMembership, sasl); - newStream = group::onApplication; + newStream = newGroup::onApplication; - groupStreams.put(groupId, group); + groupStreams.put(groupId, newGroup); + } + else if (HIGHLANDER_PROTOCOL.equals(protocol)) + { + group.onApplicationMigrate(begin, application); + newStream = group::onApplication; } } } @@ -1244,28 +1245,29 @@ private int decodeLeaveGroupResponse( private final class KafkaGroupStream { - private final MessageConsumer application; private final ClusterClient clusterClient; private final DescribeClient describeClient; private final CoordinatorClient coordinatorClient; private final GroupMembership groupMembership; private final String groupId; private final String protocol; - private int timeout; - private final long originId; - private final long routedId; - private final long initialId; - private final long replyId; - private final long affinity; private final long resolvedId; - private final KafkaSaslConfig sasl; - public String host; - public int port; + + private MessageConsumer application; + private String host; private String nodeId; + private int port; + private int timeout; private MutableDirectBuffer metadataBuffer; private int state; + private long originId; + private long routedId; + private long initialId; + private long replyId; + private long affinity; + private long initialSeq; private long initialAck; private int initialMax; @@ -1302,7 +1304,6 @@ private final class KafkaGroupStream this.timeout = timeout; this.resolvedId = resolvedId; this.groupMembership = groupMembership; - this.sasl = sasl; this.clusterClient = new ClusterClient(routedId, resolvedId, sasl, this); this.describeClient = new DescribeClient(routedId, resolvedId, sasl, this); this.coordinatorClient = new CoordinatorClient(routedId, resolvedId, sasl, this); @@ -1374,7 +1375,11 @@ private void onApplicationBegin( state = KafkaState.openingInitial(state); - clusterClient.doNetworkBeginIfNecessary(traceId, authorization, affinity); + if (coordinatorClient.nextJoinGroupRequestId == 0) + { + clusterClient.doNetworkBeginIfNecessary(traceId, authorization, affinity); + } + doApplicationWindow(traceId, 0L, 0, 0, 0); } @@ -1623,7 +1628,7 @@ private void doApplicationReset( private void doApplicationAbortIfNecessary( long traceId) { - if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) + if (!KafkaState.replyClosed(state)) { doApplicationAbort(traceId); } @@ -1633,7 +1638,7 @@ private void doApplicationResetIfNecessary( long traceId, Flyweight extension) { - if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) + if (!KafkaState.initialClosed(state)) { doApplicationReset(traceId, extension); } @@ -1669,13 +1674,36 @@ private void cleanupApplication( groupStreams.remove(groupId); } - private void streamCleanup( - long traceId, - long authorizationId) + private void onApplicationMigrate( + BeginFW begin, + MessageConsumer application) { - cleanupApplication(traceId, EMPTY_OCTETS); - clusterClient.cleanupNetwork(traceId, authorizationId); - coordinatorClient.cleanupNetwork(traceId, authorizationId); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long affinity = begin.affinity(); + final long traceId = begin.traceId(); + + doApplicationResetIfNecessary(traceId, EMPTY_OCTETS); + doApplicationAbortIfNecessary(traceId); + + this.application = application; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.affinity = affinity; + + if (KafkaState.closed(state)) + { + initialSeq = 0; + initialAck = 0; + replyAck = 0; + replySeq = 0; + state = 0; + } + + coordinatorClient.doJoinGroupRequest(traceId); } } @@ -3073,9 +3101,10 @@ private final class CoordinatorClient extends KafkaSaslClient private int nextResponseId; private long heartbeatRequestId = NO_CANCEL_ID; - private String leader; private int generationId; + private int nextJoinGroupRequestId; + private int nextJoinGroupResponseId; private KafkaGroupCoordinatorClientDecoder decoder; private LongLongConsumer encoder; private OctetsFW assignment = EMPTY_OCTETS; @@ -3292,6 +3321,10 @@ private void doNetworkBeginIfNecessary( replyAck = 0; replySeq = 0; state = 0; + nextJoinGroupRequestId = 0; + nextJoinGroupResponseId = 0; + nextRequestId = 0; + nextResponseId = 0; } if (!KafkaState.initialOpening(state)) @@ -3308,6 +3341,8 @@ private void doNetworkBegin( this.initialId = supplyInitialId.applyAsLong(routedId); this.replyId = supplyReplyId.applyAsLong(initialId); + nextJoinGroupRequestId++; + state = KafkaState.openingInitial(state); Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) @@ -3351,7 +3386,7 @@ private void doNetworkEnd( long traceId, long authorization) { - if (!KafkaState.initialClosed(state)) + if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state)) { state = KafkaState.closedInitial(state); @@ -3380,7 +3415,7 @@ private void doNetworkAbort( private void doNetworkReset( long traceId) { - if (!KafkaState.replyClosed(state)) + if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state)) { doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax, traceId, authorization, EMPTY_OCTETS); @@ -3798,7 +3833,9 @@ private void doSyncRequest( OctetsFW assignment) { this.assignment = assignment; - doEncodeSyncGroupRequest(traceId, budgetId); + + encoder = encodeSyncGroupRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); } private void doJoinGroupRequest( @@ -3810,7 +3847,14 @@ private void doJoinGroupRequest( heartbeatRequestId = NO_CANCEL_ID; } - doEncodeJoinGroupRequest(traceId, 0); + if (nextJoinGroupRequestId != 0 && + nextJoinGroupRequestId == nextJoinGroupResponseId) + { + encoder = encodeJoinGroupRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } + + nextJoinGroupRequestId++; } private void doHeartbeat( @@ -4057,50 +4101,57 @@ private void onJoinGroupResponse( String memberId) { nextResponseId++; - - this.leader = leaderId; + nextJoinGroupResponseId++; delegate.groupMembership.memberIds.put(delegate.groupId, memberId); - delegate.doApplicationFlush(traceId, authorization, - ex -> ex.set((b, o, l) -> kafkaFlushExRW.wrap(b, o, l) - .typeId(kafkaTypeId) - .group(g -> g.leaderId(leaderId) - .memberId(memberId) - .members(gm -> members.forEach(m -> - { - OctetsFW metadata = m.metadata; - DirectBuffer buffer = metadata.value(); - final int limit = metadata.sizeof(); - - int progress = 0; - - ConsumerSubscriptionMetadataFW newGroupMetadata = subscriptionMetadataRO - .wrap(buffer, 0, metadata.sizeof()); - progress = newGroupMetadata.limit(); - - for (int i = 0; i < newGroupMetadata.metadataTopicCount(); i++) + if (nextJoinGroupRequestId == nextJoinGroupResponseId) + { + delegate.doApplicationFlush(traceId, authorization, + ex -> ex.set((b, o, l) -> kafkaFlushExRW.wrap(b, o, l) + .typeId(kafkaTypeId) + .group(g -> g.leaderId(leaderId) + .memberId(memberId) + .members(gm -> members.forEach(m -> { - ConsumerMetadataTopicFW topic = metadataTopicRO.wrap(buffer, progress, limit); - progress = topic.limit(); - } + OctetsFW metadata = m.metadata; + DirectBuffer buffer = metadata.value(); + final int limit = metadata.sizeof(); - ConsumerSubscriptionUserdataFW userdata = subscriptionUserdataRO.wrap(buffer, progress, limit); + int progress = 0; - gm.item(i -> - { - KafkaGroupMemberFW.Builder builder = i.id(m.memberId); - OctetsFW newUserdata = userdata.userdata(); - if (newUserdata.sizeof() > 0) + ConsumerSubscriptionMetadataFW newGroupMetadata = subscriptionMetadataRO + .wrap(buffer, 0, metadata.sizeof()); + progress = newGroupMetadata.limit(); + + for (int i = 0; i < newGroupMetadata.metadataTopicCount(); i++) { - builder.metadataLen(newUserdata.sizeof()).metadata(newUserdata); + ConsumerMetadataTopicFW topic = metadataTopicRO.wrap(buffer, progress, limit); + progress = topic.limit(); } - }); - }))) - .build() - .sizeof())); - encoder = encodeSyncGroupRequest; + ConsumerSubscriptionUserdataFW userdata = subscriptionUserdataRO.wrap(buffer, progress, limit); + + gm.item(i -> + { + KafkaGroupMemberFW.Builder builder = i.id(m.memberId); + OctetsFW newUserdata = userdata.userdata(); + if (newUserdata.sizeof() > 0) + { + builder.metadataLen(newUserdata.sizeof()).metadata(newUserdata); + } + }); + }))) + .build() + .sizeof())); + + encoder = encodeSyncGroupRequest; + } + else + { + encoder = encodeJoinGroupRequest; + signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); + } } private void onSynGroupRebalance( @@ -4109,6 +4160,8 @@ private void onSynGroupRebalance( { nextResponseId++; + nextJoinGroupRequestId++; + encoder = encodeJoinGroupRequest; signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); } @@ -4200,7 +4253,6 @@ private void onHeartbeatResponse( } encoder = encodeHeartbeatRequest; - heartbeatRequestId = signaler.signalAt(currentTimeMillis() + delegate.timeout / 2, originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); } @@ -4224,6 +4276,8 @@ private void onRebalanceError( { nextResponseId++; + nextJoinGroupRequestId++; + encoder = encodeJoinGroupRequest; signaler.signalNow(originId, routedId, initialId, SIGNAL_NEXT_REQUEST, 0); } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java index f4bdf04b0e..f0a5b8e7ee 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java @@ -110,6 +110,16 @@ public void shouldRebalanceProtocolHighlanderMigrateLeader() throws Exception k3po.finish(); } + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/rebalance.protocol.highlander.migrate.leader.in.parallel/client", + "${net}/rebalance.protocol.highlander.migrate.leader.in.parallel/server"}) + public void shouldRebalanceProtocolHighlanderMigrateLeaderInParallel() throws Exception + { + k3po.finish(); + } + @Test @Configuration("client.yaml") @Specification({ diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt new file mode 100644 index 0000000000..3349b8fd90 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt @@ -0,0 +1,72 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +read notify ROUTED_FIRST_GROUP + +write aborted + +connect await ROUTED_FIRST_GROUP "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "half-duplex" + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} + +read advised zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +write zilla:data.empty +write flush + +read zilla:data.empty diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt new file mode 100644 index 0000000000..e6b22e881a --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt @@ -0,0 +1,73 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property serverAddress "zilla://streams/app0" + +accept ${serverAddress} + option zilla:window 8192 + option zilla:transmission "half-duplex" + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +read abort + +accepted + +read zilla:begin.ext ${kafka:matchBeginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(45000) + .build() + .build()} + +connected + +write zilla:begin.ext ${kafka:beginEx() + .typeId(zilla:id("kafka")) + .group() + .groupId("test") + .protocol("highlander") + .timeout(30000) + .build() + .build()} +write flush + +write advise zilla:flush ${kafka:flushEx() + .typeId(zilla:id("kafka")) + .group() + .leaderId("memberId-1") + .memberId("memberId-1") + .members("memberId-1") + .build() + .build()} + +read zilla:data.empty + +write zilla:data.empty +write flush diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt new file mode 100644 index 0000000000..857f32f290 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt @@ -0,0 +1,187 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property networkConnectWindow 8192 +property instanceId ${kafka:randomBytes(42)} + +property newRequestId ${kafka:newRequestId()} +property fetchWaitMax 500 +property fetchBytesMax 65535 +property partitionBytesMax 8192 + +connect "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 22 # size + 10s # find coordinator + 1s # v1 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # "session" coordinator key + [0x00] # coordinator group type + +read 35 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 9s "localhost" # host + 9092 # port + +write close +read abort + +read notify ROUTED_CLUSTER_SERVER_FIRST + +connect await ROUTED_CLUSTER_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 82 # size + 32s # describe configs + 0s # v0 + ${newRequestId} + -1s # no client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +read 103 # size + (int:newRequestId) + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +read notify ROUTED_DESCRIBE_SERVER_FIRST + +connect await ROUTED_DESCRIBE_SERVER_FIRST + "zilla://streams/net0" + option zilla:window ${networkConnectWindow} + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write 119 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 30000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 14 # metadata size + ${kafka:randomBytes(14)} # metadata + +read 126 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions + +write 129 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 30000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 14 # metadata size + ${kafka:randomBytes(14)} # metadata + +read 126 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions + +write 101 # size + 14s # sync group + 3s # v3 + ${newRequestId} + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +read 14 # size + (int:newRequestId) + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt new file mode 100644 index 0000000000..2201fa8c53 --- /dev/null +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt @@ -0,0 +1,171 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +property instanceId ${kafka:randomBytes(42)} + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted + +connected + +read 22 # size + 10s # find coordinator + 1s # v1 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # "test" coordinator key + [0x00] # coordinator group type + +write 35 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 4s "none" # error message none + 0 # coordinator node + 9s "localhost" # host + 9092 # port + +read closed +write aborted + +accepted + +connected + +read 82 # size + 32s # describe configs + 0s # v0 + (int:newRequestId) + -1s # no client id + 1 # resources + [0x04] # broker resource + 1s "0" # "node" topic + 2 # configs + 28s "group.min.session.timeout.ms" # name + 28s "group.max.session.timeout.ms" # name + +write 103 # size + ${newRequestId} + 0 + 1 # resources + 0s # no error + -1s # error message + [0x04] # broker resource + 1s "0" # "0" nodeId + 2 # configs + 28s "group.min.session.timeout.ms" # name + 4s "6000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + 28s "group.max.session.timeout.ms" # name + 5s "30000" # value + [0x00] # not read only + [0x00] # not default + [0x00] # not sensitive + +accepted + +connected + +read 119 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 30000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata + +write 126 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions + +read 129 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 30000 # session timeout + 4000 # rebalance timeout + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata + +write 126 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 3 # generated id + 10s "highlander" # protocol name + 10s "memberId-1" # leader id + 10s "memberId-1" # consumer member group id + 1 # members + 10s "memberId-1" # consumer member group id + 42s ${instanceId} # group instance id + 14 # metadata size + 2s # version + 0 # topics + 0 # userdata + 0 # partitions + +read 101 # size + 14s # sync group + 3s # v3 + (int:newRequestId) + 5s "zilla" # no client id + 4s "test" # consumer group + 3 # generation id + 10s "memberId-1" # consumer member group id + 42s [0..42] # group instance id + 1 # assignments + 10s "memberId-1" # consumer member group id + 0 # metadata + +write 14 # size + ${newRequestId} + 0 # throttle time + 0s # no error + 0 # assignment diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt index 3d957289ca..59668c7c17 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -153,90 +153,6 @@ read 14 # size 0s # no error 0 # assignment -write close -read abort - -read notify ROUTED_BROKER_SERVER_SECOND - -connect await ROUTED_BROKER_SERVER_SECOND "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - -write 22 # size - 10s # find coordinator - 1s # v1 - ${newRequestId} - 5s "zilla" # client id - 4s "test" # "session" coordinator key - [0x00] # coordinator group type - -read 35 # size - (int:newRequestId) - 0 # throttle time - 0s # no error - 4s "none" # error message none - 0 # coordinator node - 9s "localhost" # host - 9092 # port - -write close -read abort - -read notify ROUTED_CLUSTER_SERVER_SECOND - -connect await ROUTED_CLUSTER_SERVER_SECOND - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - -write 82 # size - 32s # describe configs - 0s # v0 - ${newRequestId} - -1s # no client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -read 103 # size - (int:newRequestId) - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - -read notify ROUTED_DESCRIBE_SERVER_SECOND - -connect await ROUTED_DESCRIBE_SERVER_SECOND - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 129 # size 11s # join group 5s # v5 diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt index cbc7d59c98..239ecb7721 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -137,72 +137,6 @@ write 14 # size 0s # no error 0 # assignment -read closed -write aborted - -accepted - -connected - -read 22 # size - 10s # find coordinator - 1s # v1 - (int:newRequestId) - 5s "zilla" # client id - 4s "test" # "test" coordinator key - [0x00] # coordinator group type - -write 35 # size - ${newRequestId} - 0 # throttle time - 0s # no error - 4s "none" # error message none - 0 # coordinator node - 9s "localhost" # host - 9092 # port - -read closed -write aborted - -accepted - -connected - -read 82 # size - 32s # describe configs - 0s # v0 - (int:requestId) - -1s # no client id - 1 # resources - [0x04] # broker resource - 1s "0" # "node" topic - 2 # configs - 28s "group.min.session.timeout.ms" # name - 28s "group.max.session.timeout.ms" # name - -write 103 # size - ${requestId} - 0 - 1 # resources - 0s # no error - -1s # error message - [0x04] # broker resource - 1s "0" # "0" nodeId - 2 # configs - 28s "group.min.session.timeout.ms" # name - 4s "6000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - 28s "group.max.session.timeout.ms" # name - 5s "30000" # value - [0x00] # not read only - [0x00] # not default - [0x00] # not sensitive - -accepted - -connected read 129 # size 11s # join group diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java index 6955a66e91..c6551a05f8 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/application/GroupIT.java @@ -72,6 +72,15 @@ public void shouldRebalanceProtocolHighlanderMigrateLeader() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/rebalance.protocol.highlander.migrate.leader.in.parallel/client", + "${app}/rebalance.protocol.highlander.migrate.leader.in.parallel/server"}) + public void shouldRebalanceProtocolHighlanderMigrateLeaderInParallel() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/rebalance.protocol.unknown/client", diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java index a9f0dbe8b5..61321b9d6d 100644 --- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java +++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/GroupIT.java @@ -90,6 +90,15 @@ public void shouldRebalanceProtocolHighlanderMigrateLeader() throws Exception k3po.finish(); } + @Test + @Specification({ + "${net}/rebalance.protocol.highlander.migrate.leader.in.parallel/client", + "${net}/rebalance.protocol.highlander.migrate.leader.in.parallel/server"}) + public void shouldRebalanceProtocolHighlanderMigrateLeaderInParallel() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${net}/rebalance.protocol.unknown/client", From d397880d83bf0458f0b1ae05b8fdf1c5b2735fdc Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Tue, 19 Sep 2023 13:17:21 -0700 Subject: [PATCH 093/115] Fix flow control issue (#430) --- runtime/binding-kafka-grpc/pom.xml | 2 +- .../stream/KafkaGrpcRemoteServerFactory.java | 41 ++++++++++++------- .../src/main/zilla/internal.idl | 1 + 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/runtime/binding-kafka-grpc/pom.xml b/runtime/binding-kafka-grpc/pom.xml index e1aadf6c7c..41b94c3bcb 100644 --- a/runtime/binding-kafka-grpc/pom.xml +++ b/runtime/binding-kafka-grpc/pom.xml @@ -26,7 +26,7 @@ 11 11 - 0.86 + 0.85 0 diff --git a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerFactory.java b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerFactory.java index 1d1cd28f75..baa6d57abb 100644 --- a/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerFactory.java +++ b/runtime/binding-kafka-grpc/src/main/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerFactory.java @@ -435,7 +435,8 @@ private void onKafkaData( helper.replyTo, newCorrelationId); } - flushGrpcClientData(grpcClient, traceId, authorization, helper.service, helper.method, flags, payload); + flushGrpcClientData(grpcClient, traceId, authorization, helper.service, helper.method, flags, + reserved, payload); } else if (helper.correlationId != null) { @@ -447,7 +448,8 @@ else if (helper.correlationId != null) GrpcClient grpcClient = grpcClients.get(lastCorrelationId); if (grpcClient != null) { - flushGrpcClientData(grpcClient, traceId, authorization, null, null, flags, payload); + flushGrpcClientData(grpcClient, traceId, authorization, null, null, flags, + reserved, payload); } } @@ -490,7 +492,8 @@ private void flushGrpcMessagesIfBuffered( final long messageTraceId = queueMessage.traceId(); final long messageAuthorization = queueMessage.authorization(); final int flags = queueMessage.flags(); - final int messageSize = queueMessage.valueLength(); + final int reserved = queueMessage.reserved(); + final int valueLength = queueMessage.valueLength(); final OctetsFW payload = queueMessage.value(); final int queuedMessageSize = queueMessage.sizeof(); @@ -506,8 +509,9 @@ private void flushGrpcMessagesIfBuffered( final int progress = grpcClient.onKafkaData(messageTraceId, messageAuthorization, flags, payload); - if (progress == messageSize) + if (progress == valueLength) { + replyReserved -= reserved; final int remaining = grpcQueueSlotOffset - progressOffset; grpcQueueBuffer.putBytes(oldProgressOffset, grpcQueueBuffer, progressOffset, remaining); @@ -517,7 +521,7 @@ private void flushGrpcMessagesIfBuffered( else if (progress > 0) { final int remainingPayload = queuedMessageSize - progress; - queueGrpcMessage(traceId, authorization, lastCorrelationId, service, method, flags, + queueGrpcMessage(traceId, authorization, lastCorrelationId, service, method, flags, reserved, payload, remainingPayload); final int remainingMessageOffset = grpcQueueSlotOffset - progressOffset; grpcQueueBuffer.putBytes(oldProgressOffset, grpcQueueBuffer, progressOffset, remainingMessageOffset); @@ -543,17 +547,26 @@ private void flushGrpcClientData( OctetsFW service, OctetsFW method, int flags, + int reserved, OctetsFW payload) { final int progress = grpcClient.onKafkaData(traceId, authorization, flags, payload); int length = payload != null ? payload.sizeof() : 0; final int remaining = length - progress; - if (remaining > 0 || payload == null) + + if (remaining == 0 && payload != null || + payload == null && KafkaGrpcState.initialClosing(grpcClient.state)) + { + replyReserved -= reserved; + } + + if (remaining > 0 && payload != null || + payload == null && !KafkaGrpcState.initialClosing(grpcClient.state)) { flags = progress == 0 ? flags : DATA_FLAG_CON; payload = payload == null ? emptyRO : payload; queueGrpcMessage(traceId, authorization, grpcClient.correlationId, service, method, - flags, payload, remaining); + flags, reserved, payload, remaining); } } @@ -564,6 +577,7 @@ private void queueGrpcMessage( OctetsFW service, OctetsFW method, int flags, + int reserved, OctetsFW payload, int length) { @@ -577,6 +591,7 @@ private void queueGrpcMessage( .traceId(traceId) .authorization(authorization) .flags(flags) + .reserved(reserved) .value(payload.buffer(), payload.offset(), length) .build(); @@ -685,14 +700,11 @@ private void doKafkaWindow( { long replyAckMax = Math.max(replySeq - replyReserved, replyAck); - if (replyWindow() - replyAckMax > 0) - { - replyAck = replyAckMax; - assert replyAck <= replySeq; + replyAck = replyAckMax; + assert replyAck <= replySeq; - doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, - traceId, authorization, replyBud, replyPad, replyCap); - } + doWindow(kafka, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, replyBud, replyPad, replyCap); } } @@ -1601,7 +1613,6 @@ private void doGrpcData( traceId, authorization, budgetId, reserved, flags, buffer, offset, length, emptyRO); initialSeq += reserved; - server.replyReserved -= length; assert initialSeq <= initialAck + initialMax; } diff --git a/runtime/binding-kafka-grpc/src/main/zilla/internal.idl b/runtime/binding-kafka-grpc/src/main/zilla/internal.idl index 49daf490d6..7ed5eb03e5 100644 --- a/runtime/binding-kafka-grpc/src/main/zilla/internal.idl +++ b/runtime/binding-kafka-grpc/src/main/zilla/internal.idl @@ -27,6 +27,7 @@ scope internal int64 traceId; int64 authorization; uint8 flags = 3; // 0x01 FIN, 0x02 INIT, 0x04 INCOMPLETE, 0x08 SKIP + int32 reserved; int32 valueLength; octets[valueLength] value = null; } From 182711df4ba7f1329b9a9d23378827001c87ee1d Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Tue, 19 Sep 2023 16:14:22 -0700 Subject: [PATCH 094/115] Set init flag for data fragmentation in grpc (#431) --- .../binding/grpc/internal/stream/GrpcClientFactory.java | 7 +++---- .../binding/grpc/internal/stream/GrpcServerFactory.java | 6 +++--- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcClientFactory.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcClientFactory.java index 5ba88f9ec9..419b50743d 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcClientFactory.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcClientFactory.java @@ -57,6 +57,7 @@ public class GrpcClientFactory implements GrpcStreamFactory { private static final int GRPC_MESSAGE_PADDING = 5; private static final int DATA_FLAG_INIT = 0x02; + private static final int DATA_FLAG_CONT = 0x00; private static final int DATA_FLAG_FIN = 0x01; private final MutableInteger headerOffsetRW = new MutableInteger(); private static final String HTTP_TYPE_NAME = "http"; @@ -753,8 +754,6 @@ private void onNetData( final int reserved = data.reserved(); final OctetsFW payload = data.payload(); - int flags = data.flags(); - assert acknowledge <= sequence; assert sequence >= replySeq; @@ -781,7 +780,7 @@ private void onNetData( .build() : EMPTY_OCTETS; - flags = messageDeferred > 0 ? flags & ~DATA_FLAG_INIT : flags; + int flags = messageDeferred > 0 ? DATA_FLAG_INIT : DATA_FLAG_INIT | DATA_FLAG_FIN; delegate.doAppData(traceId, authorization, budgetId, reserved, flags, buffer, offset + GRPC_MESSAGE_PADDING, payloadSize, dataEx); } @@ -790,7 +789,7 @@ private void onNetData( messageDeferred -= size; assert messageDeferred >= 0; - flags = messageDeferred > 0 ? flags & ~DATA_FLAG_INIT : flags; + int flags = messageDeferred > 0 ? DATA_FLAG_CONT : DATA_FLAG_FIN; delegate.doAppData(traceId, authorization, budgetId, reserved, flags, buffer, offset, size, EMPTY_OCTETS); diff --git a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcServerFactory.java b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcServerFactory.java index a1f43012bc..89848e73e0 100644 --- a/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcServerFactory.java +++ b/runtime/binding-grpc/src/main/java/io/aklivity/zilla/runtime/binding/grpc/internal/stream/GrpcServerFactory.java @@ -69,6 +69,7 @@ public final class GrpcServerFactory implements GrpcStreamFactory { private static final int GRPC_MESSAGE_PADDING = 5; private static final int DATA_FLAG_INIT = 0x02; + private static final int DATA_FLAG_CONT = 0x00; private static final int DATA_FLAG_FIN = 0x01; private static final int EXPIRING_SIGNAL = 1; private static final String HTTP_TYPE_NAME = "http"; @@ -507,7 +508,6 @@ private void onNetData( final long budgetId = data.budgetId(); final int reserved = data.reserved(); final OctetsFW payload = data.payload(); - int flags = data.flags(); assert acknowledge <= sequence; assert sequence >= initialSeq; @@ -537,7 +537,7 @@ private void onNetData( .deferred(messageDeferred) .build() : EMPTY_OCTETS; - flags = messageDeferred > 0 ? flags & ~DATA_FLAG_INIT : flags; + int flags = messageDeferred > 0 ? DATA_FLAG_INIT : DATA_FLAG_INIT | DATA_FLAG_FIN; delegate.doAppData(traceId, authorization, budgetId, reserved, flags, buffer, offset + GRPC_MESSAGE_PADDING, payloadSize, dataEx); } @@ -551,7 +551,7 @@ private void onNetData( messageDeferred -= size; assert messageDeferred >= 0; - flags = messageDeferred > 0 ? flags & ~DATA_FLAG_INIT : flags; + int flags = messageDeferred > 0 ? DATA_FLAG_CONT : DATA_FLAG_FIN; delegate.doAppData(traceId, authorization, budgetId, reserved, flags, buffer, offset, size, EMPTY_OCTETS); From 4ae402bc65f565fda5cdc67a1dccd374b82058e4 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Wed, 20 Sep 2023 01:30:54 +0200 Subject: [PATCH 095/115] Mqtt client implementation (#398) --- .../streams/mqtt/session.subscribe/client.rpt | 11 + .../streams/mqtt/session.subscribe/server.rpt | 11 + .../stream/MqttKafkaSessionFactory.java | 42 +- .../binding/mqtt/internal/MqttFunctions.java | 92 +- .../main/resources/META-INF/zilla/mqtt.idl | 22 +- .../mqtt/schema/mqtt.schema.patch.json | 2 +- .../application/client.sent.abort/client.rpt | 21 + .../application/client.sent.abort/server.rpt | 24 +- .../application/client.sent.close/client.rpt | 23 - .../client.rpt | 43 + .../server.rpt | 31 +- .../client.rpt | 46 + .../server.rpt | 43 + .../connect.maximum.qos.0/client.rpt | 43 + .../connect.maximum.qos.0/server.rpt | 45 + .../client.rpt | 18 +- .../server.rpt | 16 +- .../client.rpt | 51 + .../server.rpt | 53 + .../client.rpt | 45 + .../server.rpt | 48 + .../connect.retain.not.supported/client.rpt | 43 + .../connect.retain.not.supported/server.rpt | 45 + .../client.rpt | 66 +- .../server.rpt | 53 +- .../publish.empty.message/client.rpt | 30 + .../publish.empty.message/server.rpt | 26 + .../publish.empty.retained.message/client.rpt | 30 + .../publish.empty.retained.message/server.rpt | 27 + .../client.rpt | 30 + .../server.rpt | 26 + .../client.rpt | 33 +- .../server.rpt | 26 + .../client.rpt | 49 + .../server.rpt | 53 + .../client.rpt | 30 + .../server.rpt | 26 + .../client.rpt | 33 +- .../server.rpt | 26 + .../publish.multiple.messages/client.rpt | 30 + .../publish.multiple.messages/server.rpt | 27 + .../client.rpt | 46 + .../server.rpt | 42 + .../publish.one.message/client.rpt | 30 + .../publish.one.message/server.rpt | 25 + .../client.rpt | 44 + .../server.rpt | 47 + .../client.rpt | 59 + .../server.rpt | 62 + .../client.rpt | 63 + .../server.rpt | 62 + .../application/publish.retained/client.rpt | 30 + .../application/publish.retained/server.rpt | 26 + .../client.rpt | 30 + .../server.rpt | 27 + .../client.rpt | 30 + .../server.rpt | 27 + .../publish.with.user.property/client.rpt | 30 + .../publish.with.user.property/server.rpt | 27 + .../client.rpt | 22 + .../server.rpt | 22 + .../session.client.takeover/client.rpt | 22 + .../session.client.takeover/server.rpt | 22 + .../session.connect.abort/client.rpt | 45 + .../session.connect.abort/server.rpt | 45 + .../client.rpt | 10 +- .../server.rpt | 10 +- .../client.rpt | 12 + .../server.rpt | 13 + .../application/session.connect/client.rpt | 43 + .../application/session.connect/server.rpt | 45 + .../session.exists.clean.start/client.rpt | 22 + .../session.exists.clean.start/server.rpt | 22 + .../client.rpt | 11 + .../server.rpt | 11 + .../client.rpt | 11 + .../server.rpt | 11 + .../session.server.sent.abort/client.rpt | 11 + .../session.server.sent.abort/server.rpt | 11 + .../client.rpt | 11 + .../server.rpt | 11 + .../client.rpt | 11 + .../server.rpt | 11 + .../client.rpt | 11 + .../server.rpt | 11 + .../application/session.subscribe/client.rpt | 17 +- .../application/session.subscribe/server.rpt | 12 + .../client.rpt | 11 + .../server.rpt | 12 + .../client.rpt | 11 + .../server.rpt | 11 + .../client.rpt | 17 +- .../server.rpt | 14 +- .../session.will.message.abort/client.rpt | 11 + .../session.will.message.abort/server.rpt | 11 + .../client.rpt | 11 + .../server.rpt | 11 + .../session.will.message.retain/client.rpt | 15 +- .../session.will.message.retain/server.rpt | 15 +- .../client.rpt | 46 + .../server.rpt | 42 + .../client.rpt | 47 +- .../server.rpt | 44 +- .../client.rpt | 46 + .../server.rpt | 43 + .../subscribe.one.message/client.rpt | 46 + .../subscribe.one.message/server.rpt | 43 + .../subscribe.publish.no.local/client.rpt | 51 +- .../subscribe.publish.no.local/server.rpt | 43 + .../client.rpt | 84 +- .../server.rpt | 74 +- .../client.rpt | 83 +- .../server.rpt | 71 +- .../client.rpt | 63 + .../server.rpt | 61 + .../client.rpt | 46 + .../server.rpt | 43 + .../subscribe.receive.message/client.rpt | 49 +- .../subscribe.receive.message/server.rpt | 43 + .../client.rpt | 110 + .../server.rpt | 105 + .../client.rpt | 109 + .../server.rpt | 109 + .../client.rpt | 44 + .../server.rpt | 49 + .../client.rpt | 44 + .../server.rpt | 48 + .../client.rpt | 44 + .../server.rpt | 47 + .../subscribe.retain.as.published/client.rpt | 46 + .../subscribe.retain.as.published/server.rpt | 43 + .../client.rpt | 46 + .../server.rpt | 43 + .../client.rpt | 46 + .../server.rpt | 43 + .../client.rpt | 46 + .../server.rpt | 43 + .../client.rpt | 46 + .../server.rpt | 44 + .../client.rpt | 46 + .../server.rpt | 43 + .../client.rpt | 48 + .../server.rpt | 45 + .../client.rpt | 48 + .../server.rpt | 45 + .../client.rpt | 48 + .../server.rpt | 45 + .../client.rpt | 66 + .../server.rpt | 61 + .../client.rpt | 66 + .../server.rpt | 62 + .../client.rpt | 66 + .../server.rpt | 61 + .../client.rpt | 77 + .../server.rpt | 78 + .../client.rpt | 48 + .../server.rpt | 45 + .../unsubscribe.after.subscribe/client.rpt | 60 + .../unsubscribe.after.subscribe/server.rpt | 57 + .../client.rpt | 65 +- .../server.rpt | 59 + .../client.rpt | 64 + .../server.rpt | 61 + .../client.rpt | 65 + .../server.rpt | 61 + .../client.rpt | 101 + .../server.rpt | 99 + .../network/client.sent.abort/client.rpt | 12 +- .../network/client.sent.abort/server.rpt | 12 +- .../network/client.sent.close/client.rpt | 12 +- .../network/client.sent.close/server.rpt | 12 +- .../network/client.sent.reset/client.rpt | 12 +- .../network/client.sent.reset/server.rpt | 12 +- .../client.rpt | 46 + .../server.rpt | 47 + .../connect.non.successful.connack/client.rpt | 37 + .../connect.non.successful.connack/server.rpt | 38 + .../client.rpt | 43 + .../server.rpt | 45 + .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 26 +- .../server.rpt | 6 +- .../client.rpt | 9 - .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 41 + .../server.rpt | 42 + .../streams/network/disconnect/client.rpt | 1 + .../streams/network/disconnect/server.rpt | 1 + .../network/ping.no.pingresp/client.rpt | 42 + .../network/ping.no.pingresp/server.rpt | 43 + .../client.rpt | 46 + .../server.rpt | 47 + .../mqtt/streams/network/ping/client.rpt | 7 +- .../mqtt/streams/network/ping/server.rpt | 7 +- .../network/publish.empty.message/client.rpt | 7 +- .../network/publish.empty.message/server.rpt | 5 +- .../publish.empty.retained.message/client.rpt | 5 +- .../publish.empty.retained.message/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../publish.multiple.messages/client.rpt | 5 +- .../publish.multiple.messages/server.rpt | 5 +- .../network/publish.one.message/client.rpt | 5 +- .../network/publish.one.message/server.rpt | 5 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../client.rpt | 2 +- .../server.rpt | 2 +- .../network/publish.retained/client.rpt | 5 +- .../network/publish.retained/server.rpt | 5 +- .../client.rpt | 7 +- .../server.rpt | 7 +- .../client.rpt | 7 +- .../server.rpt | 7 +- .../publish.with.user.property/client.rpt | 7 +- .../publish.with.user.property/server.rpt | 7 +- .../client.rpt | 10 +- .../server.rpt | 10 +- .../session.client.takeover/client.rpt | 10 +- .../session.client.takeover/server.rpt | 10 +- .../client.rpt | 6 +- .../server.rpt | 6 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../session.exists.clean.start/client.rpt | 10 +- .../session.exists.clean.start/server.rpt | 10 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../network/session.subscribe/client.rpt | 7 +- .../network/session.subscribe/server.rpt | 7 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../session.will.message.retain/client.rpt | 13 +- .../session.will.message.retain/server.rpt | 13 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../network/subscribe.one.message/client.rpt | 5 +- .../network/subscribe.one.message/server.rpt | 5 +- .../subscribe.publish.no.local/client.rpt | 5 +- .../subscribe.publish.no.local/server.rpt | 7 +- .../client.rpt | 19 +- .../server.rpt | 19 +- .../client.rpt | 15 +- .../server.rpt | 15 +- .../client.rpt | 9 +- .../server.rpt | 9 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../subscribe.receive.message/client.rpt | 5 +- .../subscribe.receive.message/server.rpt | 5 +- .../client.rpt | 65 + .../server.rpt | 66 + .../client.rpt | 56 + .../server.rpt | 64 + .../subscribe.retain.as.published/client.rpt | 5 +- .../subscribe.retain.as.published/server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 5 +- .../server.rpt | 5 +- .../client.rpt | 9 +- .../server.rpt | 9 +- .../client.rpt | 9 +- .../server.rpt | 9 +- .../client.rpt | 9 +- .../server.rpt | 9 +- .../client.rpt | 52 + .../server.rpt | 53 + .../client.rpt | 5 +- .../server.rpt | 5 +- .../unsubscribe.after.subscribe/client.rpt | 9 +- .../unsubscribe.after.subscribe/server.rpt | 9 +- .../client.rpt | 9 +- .../server.rpt | 9 +- .../client.rpt | 9 +- .../server.rpt | 9 +- .../client.rpt | 63 + .../server.rpt | 65 + .../mqtt/internal/MqttFunctionsTest.java | 20 +- .../streams/application/ConnectionIT.java | 62 +- .../mqtt/streams/application/SessionIT.java | 9 + .../mqtt/streams/application/SubscribeIT.java | 26 + .../streams/application/UnsubscribeIT.java | 8 + .../mqtt/streams/network/ConnectionIT.java | 36 + .../binding/mqtt/streams/network/PingIT.java | 18 + .../mqtt/streams/network/SubscribeIT.java | 27 + .../mqtt/streams/network/UnsubscribeIT.java | 9 + .../mqtt/internal/MqttBindingContext.java | 4 +- .../mqtt/internal/MqttConfiguration.java | 41 +- .../mqtt/internal/MqttReasonCodes.java | 6 +- .../internal/stream/MqttClientFactory.java | 4118 ++++++++++++++++- .../internal/stream/MqttServerFactory.java | 719 +-- .../mqtt/internal/MqttConfigurationTest.java | 17 +- .../internal/stream/client/ConnectionIT.java | 115 + .../mqtt/internal/stream/client/PingIT.java | 79 + .../internal/stream/client/PublishIT.java | 137 + .../internal/stream/client/SubscribeIT.java | 315 ++ .../internal/stream/client/UnsubscribeIT.java | 95 + .../stream/{ => server}/ConnectionIT.java | 201 +- .../internal/stream/{ => server}/PingIT.java | 14 +- .../stream/{ => server}/PublishIT.java | 129 +- .../stream/{ => server}/SessionIT.java | 42 +- .../stream/{ => server}/SubscribeIT.java | 130 +- .../stream/{ => server}/UnsubscribeIT.java | 42 +- 328 files changed, 13615 insertions(+), 1323 deletions(-) delete mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt rename incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/{connect.authorize.publish.one.message => connect.delegate.connack.properties}/server.rpt (61%) create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt rename incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/{connect.authorize.publish.one.message => connect.non.successful.connack}/client.rpt (64%) rename incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/{client.sent.close => connect.non.successful.connack}/server.rpt (58%) create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt create mode 100644 incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt create mode 100644 incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java create mode 100644 incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java create mode 100644 incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java create mode 100644 incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java create mode 100644 incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java rename incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/{ => server}/ConnectionIT.java (58%) rename incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/{ => server}/PingIT.java (78%) rename incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/{ => server}/PublishIT.java (59%) rename incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/{ => server}/SessionIT.java (75%) rename incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/{ => server}/SubscribeIT.java (61%) rename incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/{ => server}/UnsubscribeIT.java (69%) diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt index d5fa2ae743..89d347cb45 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .qosMax(0) + .packetSizeMax(9216) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client-1") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt index 4e745cf720..931e03badf 100644 --- a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt +++ b/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .expiry(1) + .qosMax(0) + .packetSizeMax(9216) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "WILDCARD") + .clientId("client-1") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java index d5f4c8ccda..50492e1efb 100644 --- a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java +++ b/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java @@ -84,6 +84,7 @@ import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttResetExFW; +import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttServerCapabilities; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.MqttSessionDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.kafka.internal.types.stream.ResetFW; @@ -127,6 +128,13 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private static final int SIGNAL_EXPIRE_SESSION = 3; private static final int SIZE_OF_UUID = 38; private static final AtomicInteger CONTEXT_COUNTER = new AtomicInteger(0); + private static final int RETAIN_AVAILABLE_MASK = 1 << MqttServerCapabilities.RETAIN.value(); + private static final int WILDCARD_AVAILABLE_MASK = 1 << MqttServerCapabilities.WILDCARD.value(); + private static final int SUBSCRIPTION_IDS_AVAILABLE_MASK = 1 << MqttServerCapabilities.SUBSCRIPTION_IDS.value(); + private static final int SHARED_SUBSCRIPTIONS_AVAILABLE_MASK = 1 << MqttServerCapabilities.SHARED_SUBSCRIPTIONS.value(); + private static final byte MQTT_KAFKA_MAX_QOS = 0; + private static final int MQTT_KAFKA_CAPABILITIES = RETAIN_AVAILABLE_MASK | WILDCARD_AVAILABLE_MASK | + SUBSCRIPTION_IDS_AVAILABLE_MASK; private final BeginFW beginRO = new BeginFW(); private final DataFW dataRO = new DataFW(); @@ -177,6 +185,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory private final MutableDirectBuffer willKeyBuffer; private final MutableDirectBuffer sessionSignalKeyBuffer; private final MutableDirectBuffer sessionExtBuffer; + private final int packetSizeMax; private final BufferPool bufferPool; private final BindingHandler streamFactory; private final Signaler signaler; @@ -219,6 +228,7 @@ public MqttKafkaSessionFactory( this.willKeyBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.sessionSignalKeyBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.sessionExtBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.packetSizeMax = writeBuffer.capacity(); this.bufferPool = context.bufferPool(); this.helper = new MqttKafkaHeaderHelper(); this.streamFactory = context.streamFactory(); @@ -2469,7 +2479,18 @@ private void onKafkaBegin( if (isSetWillFlag(delegate.sessionFlags)) { - delegate.doMqttBegin(traceId, authorization, affinity, EMPTY_OCTETS); + Flyweight mqttBeginEx = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder + .flags(delegate.sessionFlags) + .expiry((int) TimeUnit.MILLISECONDS.toSeconds(delegate.sessionExpiryMillis)) + .qosMax(MQTT_KAFKA_MAX_QOS) + .packetSizeMax(packetSizeMax) + .capabilities(MQTT_KAFKA_CAPABILITIES) + .clientId(delegate.clientId)) + .build(); + + delegate.doMqttBegin(traceId, authorization, affinity, mqttBeginEx); } doKafkaWindow(traceId, authorization, 0, 0); } @@ -3178,19 +3199,22 @@ private void onKafkaBegin( sessionExpiryMillisInRange = kafkaGroupBeginEx.timeout(); } - Flyweight mqttBeginEx = EMPTY_OCTETS; if (delegate.sessionExpiryMillis != sessionExpiryMillisInRange) { delegate.sessionExpiryMillis = sessionExpiryMillisInRange; - mqttBeginEx = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) - .typeId(mqttTypeId) - .session(sessionBuilder -> sessionBuilder - .flags(delegate.sessionFlags) - .expiry((int) TimeUnit.MILLISECONDS.toSeconds(delegate.sessionExpiryMillis)) - .clientId(delegate.clientId)) - .build(); } + Flyweight mqttBeginEx = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder + .flags(delegate.sessionFlags) + .expiry((int) TimeUnit.MILLISECONDS.toSeconds(delegate.sessionExpiryMillis)) + .qosMax(MQTT_KAFKA_MAX_QOS) + .packetSizeMax(packetSizeMax) + .capabilities(MQTT_KAFKA_CAPABILITIES) + .clientId(delegate.clientId)) + .build(); + delegate.doMqttBegin(traceId, authorization, affinity, mqttBeginEx); doKafkaWindow(traceId, authorization, 0, 0, 0); } diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java index cf0d647f66..8653be2e7f 100644 --- a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java +++ b/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java @@ -55,6 +55,7 @@ import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttPublishBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttPublishDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttResetExFW; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttServerCapabilities; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionBeginExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionDataExFW; import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttSessionDataKind; @@ -221,6 +222,30 @@ public MqttSessionBeginExBuilder expiry( return this; } + public MqttSessionBeginExBuilder qosMax( + int qosMax) + { + sessionBeginExRW.qosMax(qosMax); + return this; + } + + public MqttSessionBeginExBuilder packetSizeMax( + int packetSizeMax) + { + sessionBeginExRW.packetSizeMax(packetSizeMax); + return this; + } + + public MqttSessionBeginExBuilder capabilities( + String... capabilityNames) + { + int capabilities = Arrays.stream(capabilityNames) + .mapToInt(flag -> 1 << MqttServerCapabilities.valueOf(flag).value()) + .reduce(0, (a, b) -> a | b); + sessionBeginExRW.capabilities(capabilities); + return this; + } + public MqttSessionBeginExBuilder flags( String... flagNames) { @@ -690,6 +715,13 @@ public MqttResetExBuilder serverRef( return this; } + public MqttResetExBuilder reasonCode( + int reasonCode) + { + resetExRW.reasonCode(reasonCode); + return this; + } + public byte[] build() { final MqttResetExFW resetEx = resetExRW.build(); @@ -724,6 +756,15 @@ public MqttSessionStateBuilder subscription( return this; } + public MqttSessionStateBuilder subscription( + String pattern, + int id, + int reasonCode) + { + sessionStateRW.subscriptionsItem(f -> f.subscriptionId(id).reasonCode(reasonCode).pattern(pattern)); + return this; + } + public MqttSessionStateBuilder subscription( String pattern, int id, @@ -1274,6 +1315,9 @@ public final class MqttSessionBeginExMatcherBuilder private String16FW clientId; private Integer expiry; private Integer flags; + private Integer capabilities; + private Integer qosMax; + private Integer packetSizeMax; private MqttSessionBeginExMatcherBuilder() { @@ -1293,6 +1337,29 @@ public MqttSessionBeginExMatcherBuilder expiry( return this; } + public MqttSessionBeginExMatcherBuilder qosMax( + int qosMax) + { + this.qosMax = qosMax; + return this; + } + + public MqttSessionBeginExMatcherBuilder capabilities( + String... capabilityNames) + { + this.capabilities = Arrays.stream(capabilityNames) + .mapToInt(flag -> 1 << MqttServerCapabilities.valueOf(flag).value()) + .reduce(0, (a, b) -> a | b); + return this; + } + + public MqttSessionBeginExMatcherBuilder packetSizeMax( + int packetSizeMax) + { + this.packetSizeMax = packetSizeMax; + return this; + } + public MqttSessionBeginExMatcherBuilder flags( String... flagNames) { @@ -1311,9 +1378,12 @@ private boolean match( MqttBeginExFW beginEx) { final MqttSessionBeginExFW sessionBeginEx = beginEx.session(); - return matchClientId(sessionBeginEx) && + return matchFlags(sessionBeginEx) && + matchClientId(sessionBeginEx) && matchExpiry(sessionBeginEx) && - matchFlags(sessionBeginEx); + matchQosMax(sessionBeginEx) && + matchPacketSizeMax(sessionBeginEx) && + matchCapabilities(sessionBeginEx); } private boolean matchClientId( @@ -1322,6 +1392,24 @@ private boolean matchClientId( return clientId == null || clientId.equals(sessionBeginEx.clientId()); } + private boolean matchQosMax( + final MqttSessionBeginExFW sessionBeginEx) + { + return qosMax == null || qosMax == sessionBeginEx.qosMax(); + } + + private boolean matchPacketSizeMax( + final MqttSessionBeginExFW sessionBeginEx) + { + return packetSizeMax == null || packetSizeMax == sessionBeginEx.packetSizeMax(); + } + + private boolean matchCapabilities( + final MqttSessionBeginExFW sessionBeginEx) + { + return capabilities == null || capabilities == sessionBeginEx.capabilities(); + } + private boolean matchExpiry( final MqttSessionBeginExFW sessionBeginEx) { diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl index 7f7395edf5..dc8bb68ca1 100644 --- a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl +++ b/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl @@ -44,7 +44,8 @@ scope mqtt enum MqttPayloadFormat { BINARY, - TEXT + TEXT, + NONE } enum MqttTime (int64) @@ -69,6 +70,7 @@ scope mqtt uint32 subscriptionId = 0; uint8 qos = 0; uint8 flags = 0; + uint8 reasonCode = 0; string16 pattern; } @@ -116,7 +118,7 @@ scope mqtt uint8 flags = 0; int32 expiryInterval = -1; string16 contentType = null; - MqttPayloadFormat format = BINARY; + MqttPayloadFormat format = NONE; string16 responseTopic = null; string16 lifetimeId = null; string16 willId = null; @@ -141,10 +143,21 @@ scope mqtt case 2: mqtt::stream::MqttSessionBeginEx session; } + enum MqttServerCapabilities (uint8) + { + RETAIN (0), + WILDCARD (1), + SUBSCRIPTION_IDS (2), + SHARED_SUBSCRIPTIONS (3) + } + struct MqttSessionBeginEx { uint8 flags = 0; int32 expiry = 0; + uint16 qosMax = 0; + uint32 packetSizeMax = 0; + uint8 capabilities = 0; string16 clientId; } @@ -177,7 +190,7 @@ scope mqtt varuint32[] subscriptionIds; int32 expiryInterval = -1; string16 contentType = null; - MqttPayloadFormat format = BINARY; + MqttPayloadFormat format = NONE; string16 responseTopic = null; MqttBinary correlation; MqttUserProperty[] properties; @@ -190,7 +203,7 @@ scope mqtt uint8 flags = 0; int32 expiryInterval = -1; string16 contentType = null; - MqttPayloadFormat format = BINARY; + MqttPayloadFormat format = NONE; string16 responseTopic = null; MqttBinary correlation; MqttUserProperty[] properties; @@ -210,6 +223,7 @@ scope mqtt struct MqttResetEx extends core::stream::Extension { string16 serverRef = null; + uint8 reasonCode = 0; } union MqttFlushEx switch (uint8) extends core::stream::Extension diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json index c20d1c71e6..54d4e16853 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json @@ -29,7 +29,7 @@ }, "kind": { - "enum": [ "server" ] + "enum": [ "server", "client" ] }, "vault": false, "options": diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt index 42bd77a5c9..b771dafd37 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt @@ -18,6 +18,27 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected +read zilla:data.empty + write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt index 128b64c61c..b5555172b2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt @@ -17,9 +17,31 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" - accepted +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected +write zilla:data.empty +write flush + + read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/client.rpt deleted file mode 100644 index 65aa4b1e76..0000000000 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/client.rpt +++ /dev/null @@ -1,23 +0,0 @@ -# -# Copyright 2021-2023 Aklivity Inc. -# -# Aklivity licenses this file to you under the Apache License, -# version 2.0 (the "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -connect "zilla://streams/app0" - option zilla:window 8192 - option zilla:transmission "duplex" - -connected - -write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt new file mode 100644 index 0000000000..88889bceaa --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(0) + .qosMax(0) + .packetSizeMax(50) + .clientId("client-1") + .build() + .build()} + +connected + +read zilla:data.empty + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt similarity index 61% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/server.rpt rename to incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt index 2555639ec8..12aed590e7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt @@ -17,30 +17,29 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" - option zilla:authorization 1L accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) - .publish() + .session() + .flags("CLEAN_START") .clientId("client") - .topic("sensor/one") .build() .build()} -connected - -read zilla:data.ext ${mqtt:matchDataEx() - .typeId(zilla:id("mqtt")) - .publish() - .qos("AT_MOST_ONCE") - .expiryInterval(15) - .contentType("message") - .format("TEXT") - .responseTopic("sensor/one") - .correlation("info") +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(0) + .qosMax(0) + .packetSizeMax(50) + .clientId("client-1") .build() - .build()} + .build()} + +connected -read "message" +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt index 6add586d14..923890e1d3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(50) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt index fe0b5d0540..a8697a4866 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(50) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt new file mode 100644 index 0000000000..c8ed292659 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(0) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt new file mode 100644 index 0000000000..7492f9ab3e --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(0) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/client.rpt similarity index 64% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/client.rpt rename to incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/client.rpt index fde90fdb5e..eaf7efbcfa 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.authorize.publish.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/client.rpt @@ -17,28 +17,20 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" - option zilla:authorization 1L write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) - .publish() + .session() + .flags("CLEAN_START") .clientId("client") - .topic("sensor/one") .build() .build()} connected -write zilla:data.ext ${mqtt:dataEx() +read zilla:reset.ext ${mqtt:resetEx() .typeId(zilla:id("mqtt")) - .publish() - .qos("AT_MOST_ONCE") - .expiryInterval(15) - .contentType("message") - .format("TEXT") - .responseTopic("sensor/one") - .correlation("info") - .build() + .reasonCode(132) .build()} -write "message" +write aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/server.rpt similarity index 58% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/server.rpt rename to incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/server.rpt index edb5fde46f..6e4e8824ed 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.close/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/server.rpt @@ -20,6 +20,20 @@ accept "zilla://streams/app0" accepted +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + connected -read closed +write zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .reasonCode(132) + .build()} +write flush + +read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt new file mode 100644 index 0000000000..365e9bb357 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt @@ -0,0 +1,51 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + +read zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .reasonCode(129) + .build()} + +write aborted +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt new file mode 100644 index 0000000000..e3b9ad7f5f --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt @@ -0,0 +1,53 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty + +write zilla:reset.ext ${mqtt:resetEx() + .typeId(zilla:id("mqtt")) + .reasonCode(129) + .build()} +write flush + +read abort +write close \ No newline at end of file diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt new file mode 100644 index 0000000000..4902ad1d9b --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START", "WILL") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write close +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt new file mode 100644 index 0000000000..bac2170456 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt @@ -0,0 +1,48 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START", "WILL") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read closed +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt new file mode 100644 index 0000000000..dc21bb3d5d --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt new file mode 100644 index 0000000000..f94abcde9e --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt index bc58168772..c8b839d1fe 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt @@ -19,31 +19,74 @@ connect "zilla://streams/app0" option zilla:transmission "duplex" write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) - .subscribe() - .clientId("client") - .filter("sensor/one", 1) - .build() + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() .build()} connected -read zilla:data.ext ${mqtt:matchDataEx() +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write await RECEIVED_SUBSCRIBE_STREAM_OPEN +write await SENT_PUBLISH_DATA +write close +read closed +read notify RECEIVE_SUBSCRIBE_CLOSED + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() - .topic("sensor/one") - .subscriptionId(1) - .format("TEXT") + .clientId("client") + .filter("sensor/one", 1) .build() .build()} -read "message" +connected +read notify RECEIVED_SUBSCRIBE_STREAM_OPEN + +write await RECEIVE_SUBSCRIBE_CLOSED write close read closed -write notify SUBSCRIBE_CLOSED -connect await SUBSCRIBE_CLOSED +connect await RECEIVED_SESSION_STATE "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" @@ -66,6 +109,7 @@ write zilla:data.ext ${mqtt:dataEx() write "message" write flush +write notify SENT_PUBLISH_DATA write close read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt index cc4584ba3f..367dc5d60f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt @@ -22,25 +22,58 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) - .subscribe() - .clientId("client") - .filter("sensor/one", 1) - .build() + .session() + .flags("CLEAN_START") + .clientId("client") + .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected -write zilla:data.ext ${mqtt:dataEx() +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read closed +write close + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .subscribe() - .topic("sensor/one") - .subscriptionId(1) - .format("TEXT") + .clientId("client") + .filter("sensor/one", 1) .build() .build()} -write "message" -write flush +connected read closed write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt index c3373f30a9..9aff694e50 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt index 3e2f7984b9..bda65ca1bd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt @@ -18,6 +18,32 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt index 59a994fa9e..f72e494334 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt index e5234d60ca..c4e8bd3cbf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt @@ -19,6 +19,33 @@ accept "zilla://streams/app0" option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt index 150bd479f6..bd3c03ccb5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt index 7700300aff..1985238c24 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt @@ -18,6 +18,32 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt index 152afdd8b0..78c686fb6e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() @@ -44,7 +74,8 @@ write zilla:data.ext ${mqtt:dataEx() write "message3" -connect "zilla://streams/app0" +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt index 1ee6c4cd19..5a00d9a593 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt @@ -18,6 +18,32 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt index e4526a2244..17b4d208ce 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() @@ -44,3 +74,22 @@ write zilla:data.ext ${mqtt:dataEx() write "message2" + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client2") + .build() + .build()} + +connected + +read zilla:data.empty + +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt index c394626fd3..af8fb2059f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt @@ -18,6 +18,32 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted @@ -47,3 +73,30 @@ read zilla:data.ext ${mqtt:matchDataEx() read "message2" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client2") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client2") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt index ea48bab788..8d8a2ff12c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt index 83562f1d64..dbe9ebc13d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt @@ -18,6 +18,32 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt index f401b1ea4c..db91f65323 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() @@ -36,7 +66,8 @@ write zilla:data.ext ${mqtt:dataEx() write "message1" -connect "zilla://streams/app0" +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt index 0ea4e6e20e..2f9653d000 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt @@ -18,6 +18,32 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt index 256071880c..5006c59363 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt index 79730b171b..14fc69b797 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt @@ -18,6 +18,33 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt index 80e57ce0fb..4cc383aeea 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/two", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt index d9850e07ee..099139ff74 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt @@ -18,6 +18,48 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/two", 1) + .build()} +write flush + accepted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt index c202e5fb9d..f15914b23e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt index 3bc19525ae..946782638d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt @@ -18,6 +18,31 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush accepted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt new file mode 100644 index 0000000000..f02246b950 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt @@ -0,0 +1,44 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(9216) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt new file mode 100644 index 0000000000..6543a0413b --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(9216) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt new file mode 100644 index 0000000000..df76616097 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt @@ -0,0 +1,59 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(0) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .build() + .build()} + +connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt new file mode 100644 index 0000000000..58cb238a7b --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt @@ -0,0 +1,62 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(0) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read aborted + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .build() + .build()} + +connected + +read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt new file mode 100644 index 0000000000..c7fbd6eb15 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt @@ -0,0 +1,63 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities(WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + +write abort + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .build() + .build()} + +connected + +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt new file mode 100644 index 0000000000..6a569d5c82 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt @@ -0,0 +1,62 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read aborted + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .build() + .build()} + +connected + +read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt index 6862e3d7f4..9212801e9c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt index 2eca44d740..5b3acac928 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt @@ -18,6 +18,32 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt index 52c7556678..7375f857b6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt index 5bbfea710f..462f3d08af 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt @@ -18,6 +18,33 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt index eca94553b4..c0d527d820 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt index 2912229271..84b66de991 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt @@ -18,6 +18,33 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt index 3eed4fccb5..c52c4f3dea 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt @@ -18,6 +18,36 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt index 22654efe10..d3925f6b6e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt @@ -18,6 +18,33 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("755452d5-e2ef-4113-b9c6-2f53de96fd76") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt index ca638f0b15..cb2fc4835a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty @@ -79,6 +90,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read ${mqtt:session() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt index 93307a50fe..315c6dad7a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty @@ -74,6 +85,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write ${mqtt:session() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt index 0b68e3386c..426e3aa983 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.ext ${mqtt:dataEx() @@ -80,6 +91,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read ${mqtt:session() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt index 3f6dfd1e96..45c66b0a18 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty @@ -78,6 +89,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write notify CLIENT_TAKEOVER diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt new file mode 100644 index 0000000000..d3980ce8ec --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +read aborted + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt new file mode 100644 index 0000000000..62685691f2 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt index 27635e8ebe..30750fea4a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt @@ -27,12 +27,16 @@ write zilla:begin.ext ${mqtt:beginEx() .build()} read zilla:begin.ext ${mqtt:matchBeginEx() - .typeId(zilla:id("mqtt")) - .session() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") .expiry(30) + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-1") .build() - .build()} + .build()} connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt index 0ecc9f30b8..1357014205 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt @@ -29,12 +29,16 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build()} write zilla:begin.ext ${mqtt:beginEx() - .typeId(zilla:id("mqtt")) - .session() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") .expiry(30) + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client-1") .build() - .build()} + .build()} connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt index 28f707919e..66bf8e17ec 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt @@ -25,6 +25,18 @@ write zilla:begin.ext ${mqtt:beginEx() .clientId("one") .build() .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("one") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt index e4da874660..d6679679de 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt @@ -27,6 +27,19 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .clientId("one") .build() .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .expiry(1) + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("one") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt new file mode 100644 index 0000000000..a8cb8971ee --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt new file mode 100644 index 0000000000..62685691f2 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt index 7bf3e9eb1d..71958940b8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty @@ -84,6 +95,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read ${mqtt:session() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt index 4161f89687..2110d50b65 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty @@ -78,6 +89,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write ${mqtt:session() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt index 19ae2a4dc7..08ea0b183e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt index dec7e5846d..4e3e92d69f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt index 20b8a30d30..90137a930e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:reset.ext ${mqtt:resetEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt index f64d01cffc..c14f65f6a6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:reset.ext ${mqtt:resetEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt index bf293f83a1..b76c68e4a6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt index f03fc943aa..8fcf49a343 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt index 9ea94267e0..4feceda522 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt index 0e04154821..14ceb06745 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt index 6930722b7f..5f7bc09f52 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt index 90796c2368..b58de36912 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt index 998bf4e10f..6e5c555867 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt index faab527cfa..d4b61821c8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt @@ -27,6 +27,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt index 578940e7d5..53c8246fc5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt @@ -21,6 +21,18 @@ connect "zilla://streams/app0" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") .clientId("client") .build() .build()} @@ -44,8 +56,11 @@ write flush read ${mqtt:session() .subscription("sensor/one", 1) .build()} +read notify RECEIVED_SESSION_STATE -connect "zilla://streams/app0" + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt index c5027bb198..0e77f8fadc 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt @@ -23,10 +23,22 @@ accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .flags("CLEAN_START") .clientId("client") .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt index 310b2003b6..1571dfd191 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt index 28543eda72..620a720865 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt @@ -26,6 +26,18 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .clientId("client") .build() .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt index 32f715dbcf..cfc946a237 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected read zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt index d85d49fe20..760c4355f6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt @@ -28,6 +28,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt index 901faadc4f..b7155f5111 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt @@ -25,6 +25,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected @@ -44,16 +55,14 @@ write ${mqtt:session() read ${mqtt:session() .subscription("sensor/one", 1) .build()} +read notify RECEIVED_SESSION_STATE -write notify SUBSCRIBED read ${mqtt:session() .build()} -write notify SESSION_READY - -connect await SESSION_READY +connect await RECEIVED_SESSION_STATE "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt index 9c281984f5..9d4cb9d6bd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt @@ -28,6 +28,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + connected write zilla:data.empty @@ -49,7 +60,7 @@ write ${mqtt:session() .build()} write flush -read await SUBSCRIBED +write await RECEIVED_SUBSCRIBE_OPEN write ${mqtt:session() .build()} write flush @@ -65,6 +76,7 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build()} connected +read notify RECEIVED_SUBSCRIBE_OPEN read advised zilla:flush ${mqtt:flushEx() .typeId(zilla:id("mqtt")) diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt index 51937e1f9e..84ccb407cc 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt @@ -26,6 +26,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL", "CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("one") + .build() + .build()} + connected write zilla:data.ext ${mqtt:dataEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt index fb26a8d19a..928eb52a5b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt @@ -28,6 +28,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL", "CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("one") + .build() + .build()} + connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt index aed7e6d713..97ccd5bbf8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt @@ -26,6 +26,17 @@ write zilla:begin.ext ${mqtt:beginEx() .build() .build()} +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL", "CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("one") + .build() + .build()} + connected write zilla:data.ext ${mqtt:dataEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt index c570dab6f8..d434f0d92d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt @@ -28,6 +28,17 @@ read zilla:begin.ext ${mqtt:matchBeginEx() .build() .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL", "CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("one") + .build() + .build()} + connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt index 8129b052e5..d28140fa3b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt @@ -16,11 +16,12 @@ connect "zilla://streams/app0" option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:transmission "half-duplex" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .session() + .flags("CLEAN_START", "WILL") .clientId("one") .build() .build()} @@ -42,4 +43,16 @@ write ${mqtt:will() .build()} write flush +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL", "CLEAN_START") + .qosMax(0) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") + .clientId("one") + .build() + .build()} + read zilla:data.empty + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt index 8bf27cb120..ceb3f834aa 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt @@ -16,13 +16,13 @@ accept "zilla://streams/app0" option zilla:window 8192 - option zilla:transmission "duplex" - + option zilla:transmission "half-duplex" accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .session() + .flags("CLEAN_START", "WILL") .clientId("one") .build() .build()} @@ -44,5 +44,16 @@ read ${mqtt:will() .payload("client one session expired") .build()} +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("WILL", "CLEAN_START") + .qosMax(0) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") + .clientId("one") + .build() + .build()} + write zilla:data.empty write flush diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt index 75131094ab..2b7df6d001 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt index a598da3468..68efdb3ecf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt @@ -20,6 +20,48 @@ accept "zilla://streams/app0" accepted +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build()} +write flush + +accepted + read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt index 8bd244254b..bf8dc91ac2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() @@ -32,7 +78,6 @@ read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .subscribe() .topic("sensor/one") - .flags("RETAIN") .subscriptionId(1) .expiryInterval(15) .contentType("message") diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt index f5bfae2423..2b5798a9b2 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() @@ -34,7 +77,6 @@ write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .subscribe() .topic("sensor/one") - .flags("RETAIN") .subscriptionId(1) .expiryInterval(15) .contentType("message") diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt index 6f76abc1cf..30d28313a9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt index 857c759faa..3b6116c818 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt index 0bcad34c48..b45659cb77 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt index 4a5cdf05c0..50a3045682 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt index 1fd3cd63f3..c0bc2759ca 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() @@ -27,6 +73,7 @@ write zilla:begin.ext ${mqtt:beginEx() .build()} connected +write notify RECEIVED_SUBSCRIBE_OPEN read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) @@ -38,10 +85,8 @@ read zilla:data.ext ${mqtt:matchDataEx() .build()} read "message2" -write notify SUBSCRIBED - -connect await SUBSCRIBED +connect await RECEIVED_SUBSCRIBE_OPEN "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt index ac3e52e227..cddcae8682 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt index fb558b094f..53da0e66b6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt @@ -18,10 +18,40 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client-1") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client-1") + .build() + .build()} + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() - .clientId("client2") + .clientId("client-1") .topic("sensor/one") .build() .build()} @@ -32,16 +62,62 @@ write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() .flags("RETAIN") + .format("TEXT") .build() .build()} write "message" write flush +write notify SENT_RETAIN_DATA + + +connect await SENT_RETAIN_DATA + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client-2") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client-2") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE2 -write notify PUBLISHED -connect await PUBLISHED +connect await RECEIVED_SESSION_STATE2 "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" @@ -49,7 +125,7 @@ connect await PUBLISHED write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() - .clientId("client1") + .clientId("client-2") .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt index aefa13a75d..755712b684 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt @@ -18,13 +18,39 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client-1") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client-1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .publish() - .clientId("client2") + .clientId("client-1") .topic("sensor/one") .build() .build()} @@ -35,17 +61,61 @@ read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() .flags("RETAIN") + .format("TEXT") .build() .build()} read "message" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client-2") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client-2") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + accepted read zilla:begin.ext ${mqtt:matchBeginEx() .typeId(zilla:id("mqtt")) .subscribe() - .clientId("client1") + .clientId("client-2") .filter("sensor/one", 1, "AT_MOST_ONCE") .build() .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt index 582c34281c..3d6019dfe0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt @@ -18,6 +18,37 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client1") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client1") + .build() + .build()} + + +connected + +read zilla:data.empty +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .publish() @@ -32,14 +63,61 @@ write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .publish() .flags("RETAIN") + .format("TEXT") .build() .build()} write "message" write flush -write notify RETAIN_PUBLISHED +write notify SENT_RETAIN_DATA + +connect await SENT_RETAIN_DATA + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client2") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client2") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build()} +read notify RECEIVED_SESSION_STATE2 + -connect await RETAIN_PUBLISHED +connect await RECEIVED_SESSION_STATE2 "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" @@ -58,7 +136,6 @@ read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .subscribe() .topic("sensor/one") - .flags("RETAIN") .subscriptionId(1) .format("TEXT") .build() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt index 097b64bef3..2c5e931635 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt @@ -18,6 +18,32 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client1") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client1") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + accepted @@ -35,12 +61,56 @@ read zilla:data.ext ${mqtt:matchDataEx() .typeId(zilla:id("mqtt")) .publish() .flags("RETAIN") + .format("TEXT") .build() .build()} read "message" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client2") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client2") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() @@ -57,7 +127,6 @@ write zilla:data.ext ${mqtt:dataEx() .typeId(zilla:id("mqtt")) .subscribe() .topic("sensor/one") - .flags("RETAIN") .subscriptionId(1) .format("TEXT") .build() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt index e7f855d63f..7fe6c75d40 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -18,6 +18,69 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/+/#", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/+/#", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/+/#", 1) + .subscription("sensor/+/1", 2) + .build()} + +read ${mqtt:session() + .subscription("sensor/+/#", 1) + .subscription("sensor/+/1", 2) + .build()} + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt index fcb3fbf8b0..c3f3bed38e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -18,6 +18,67 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/+/#", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/+/#", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/+/#", 1) + .subscription("sensor/+/1", 2) + .build()} + +write ${mqtt:session() + .subscription("sensor/+/#", 1) + .subscription("sensor/+/1", 2) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt index 74d45ce695..25304564a4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/+", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/+", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt index a13fc5d8dc..3a88d85ead 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/+", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/+", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt index ae9fbea489..01678ad7a6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() @@ -39,7 +85,8 @@ read zilla:data.ext ${mqtt:matchDataEx() read "message" -connect "zilla://streams/app0" +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt index db5c3252fd..7da984932b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt new file mode 100644 index 0000000000..a1974837f0 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt @@ -0,0 +1,110 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +read "message" + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} +write "message" +write flush + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt new file mode 100644 index 0000000000..7da984932b --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt @@ -0,0 +1,105 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +write "message" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .publish() + .format("TEXT") + .build() + .build()} +read "message" + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt new file mode 100644 index 0000000000..9d460c24da --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt @@ -0,0 +1,109 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} +read notify RECEIVED_NEW_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE") + .build() + .build()} + +connected + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} +read "message" + +write await RECEIVED_NEW_SESSION_STATE +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1) + .filter("sensor/two", 2) + .build() + .build()} + +read zilla:data.ext ${mqtt:matchDataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} +read "message" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt new file mode 100644 index 0000000000..729f9790fc --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt @@ -0,0 +1,109 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/one") + .subscriptionId(1) + .format("TEXT") + .build() + .build()} + +write "message" +write flush + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/one", 1) + .filter("sensor/two", 2) + .build() + .build()} + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .topic("sensor/two") + .subscriptionId(2) + .format("TEXT") + .build() + .build()} +write "message" +write flush \ No newline at end of file diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt new file mode 100644 index 0000000000..23f920f17a --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt @@ -0,0 +1,44 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt new file mode 100644 index 0000000000..3167305bbe --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt @@ -0,0 +1,49 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read aborted + + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt new file mode 100644 index 0000000000..4a57d637db --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt @@ -0,0 +1,44 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt new file mode 100644 index 0000000000..dbb15859eb --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt @@ -0,0 +1,48 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read aborted + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt new file mode 100644 index 0000000000..b3ba629994 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt @@ -0,0 +1,44 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt new file mode 100644 index 0000000000..fffe14919d --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt index 63d6ae8673..fda2157214 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt index 562ad9c206..c2e06f5318 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED", "RETAIN_AS_PUBLISHED") + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt index fd61ba9ff8..1894b9768e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt @@ -15,6 +15,52 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/#", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/#", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt index 496fea1a0c..8c743bf6bd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/#", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/#", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt index ed61885ebf..fcd6c91678 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt @@ -15,6 +15,52 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/+/1/#", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/+/1/#", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt index 8a13815211..c03d44bf10 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/+/1/#", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/+/1/#", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt index 32c4bf265f..70eff1aca7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt @@ -18,6 +18,52 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt index b2725ffb3c..78e751b79a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt index 7d5f7d7bfb..f33e1f91fe 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt @@ -15,6 +15,52 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/+", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/+", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt index 997484b097..9fb6d03c6c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt @@ -18,6 +18,50 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/+", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/+", 1) + .build()} +write flush + + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt index a718e65d67..ed53019dbb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt @@ -15,6 +15,52 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/+/+", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/+/+", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt index 85e8ee4607..41af57518e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt @@ -18,6 +18,49 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/+/+", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/+/+", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt index bccc5a676f..28fb765733 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt @@ -15,6 +15,54 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt index 07dc698f05..44d030c4a7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt @@ -18,6 +18,51 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt index 8a716db1b7..f319d2c650 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt @@ -15,6 +15,54 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("device/#", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("device/#", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt index 8f18a8eb6d..c253604fbf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt @@ -18,6 +18,51 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("device/#", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("device/#", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt index 91ab3c94bb..5255ad1a10 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt @@ -15,6 +15,54 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/#", 1) + .subscription("device/#", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/#", 1) + .subscription("device/#", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt index 3e87f226e0..3628e544e0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt @@ -18,6 +18,51 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/#", 1) + .subscription("device/#", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/#", 1) + .subscription("device/#", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt index 71f67b8c52..56864dabda 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -15,6 +15,71 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} +read notify RECEIVED_NEW_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" @@ -28,6 +93,7 @@ write zilla:begin.ext ${mqtt:beginEx() connected +write await RECEIVED_NEW_SESSION_STATE write advise zilla:flush ${mqtt:flushEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt index 0e4e1a761a..9c873daaea 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -18,6 +18,67 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 2) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt index 127e186273..2b2003ecf1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt @@ -15,6 +15,71 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/#", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/#", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/#", 1) + .subscription("device/#", 2) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/#", 1) + .subscription("device/#", 2) + .build()} +read notify RECEIVED_NEW_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" @@ -28,6 +93,7 @@ write zilla:begin.ext ${mqtt:beginEx() connected +write await RECEIVED_NEW_SESSION_STATE write advise zilla:flush ${mqtt:flushEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt index d7a7bb2088..e966263783 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt @@ -18,6 +18,68 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/#", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/#", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/#", 1) + .subscription("device/#", 2) + .build()} + +write ${mqtt:session() + .subscription("sensor/#", 1) + .subscription("device/#", 2) + .build()} +write flush + + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index f91aafaeb5..b11c1e4ee9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -15,6 +15,71 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("device/#", 2) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("device/#", 2) + .build()} +read notify RECEIVED_NEW_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" @@ -28,6 +93,7 @@ write zilla:begin.ext ${mqtt:beginEx() connected +write await RECEIVED_NEW_SESSION_STATE write advise zilla:flush ${mqtt:flushEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index 1c11615786..c8b5ff2b20 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -18,6 +18,67 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("device/#", 2) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("device/#", 2) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt new file mode 100644 index 0000000000..f04d809380 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt @@ -0,0 +1,77 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1, 0) + .subscription("sensor/two", 1, 135) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt new file mode 100644 index 0000000000..0248734261 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt @@ -0,0 +1,78 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1, 135) + .build()} +write flush + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .build() + .build()} + +connected + + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt index 4cf5f87625..0840ca0c91 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt @@ -15,6 +15,54 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/+/#", 1) + .subscription("sensor/+/1", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/+/#", 1) + .subscription("sensor/+/1", 1) + .build()} +read notify RECEIVED_SESSION_STATE + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt index 1661014959..5fe13e09ce 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt @@ -18,6 +18,51 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/+/#", 1) + .subscription("sensor/+/1", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/+/#", 1) + .subscription("sensor/+/1", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt index e434d60926..1eb10f6e72 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt @@ -18,6 +18,66 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .build()} +write flush + +read ${mqtt:session() + .build()} + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt index c55c1fe474..cc53c1bc79 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt @@ -18,6 +18,63 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .build()} + +write ${mqtt:session() + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt index a261a23c1f..088b00bdbd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt @@ -16,7 +16,69 @@ connect "zilla://streams/app0" option zilla:window 8192 - option zilla:transmission "duplex" + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .build()} +write flush + +read ${mqtt:session() + .build()} +read notify RECEIVED_NEW_SESSION_STATE + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) @@ -29,6 +91,7 @@ write zilla:begin.ext ${mqtt:beginEx() connected +write await RECEIVED_NEW_SESSION_STATE write advise zilla:flush ${mqtt:flushEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt index 5e29cdd216..3eb936e39b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt @@ -18,6 +18,65 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .build()} + +write ${mqtt:session() + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt index 864949f7ea..14471265ea 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt @@ -15,6 +15,70 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/two", 1) + .build()} + + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt index 17a5f2539f..fe80f7bfae 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt @@ -18,6 +18,67 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/two", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt index b1d4973ec3..54eb445aa6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt @@ -15,6 +15,70 @@ # connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/two", 1) + .build()} +read notify RECEIVED_NEW_SESSION_STATE + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" @@ -29,6 +93,7 @@ write zilla:begin.ext ${mqtt:beginEx() connected +write await RECEIVED_NEW_SESSION_STATE write advise zilla:flush ${mqtt:flushEx() .typeId(zilla:id("mqtt")) .subscribe() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt index 99b60b0f54..c545952949 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt @@ -18,6 +18,67 @@ accept "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/two", 1) + .build()} +write flush + + accepted read zilla:begin.ext ${mqtt:matchBeginEx() diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt new file mode 100644 index 0000000000..5f9f15fc2e --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt @@ -0,0 +1,101 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +read zilla:data.empty + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +read notify RECEIVED_SESSION_STATE + +write zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +write ${mqtt:session() + .build()} +write flush + +read ${mqtt:session() + .subscription("sensor/two", 1, 135) + .build()} +read notify RECEIVED_NEW_SESSION_STATE + +connect await RECEIVED_SESSION_STATE + "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .filter("sensor/two", 1) + .build() + .build()} + +connected + +write await RECEIVED_NEW_SESSION_STATE +write advise zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/two", 1) + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt new file mode 100644 index 0000000000..adcf408f35 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt @@ -0,0 +1,99 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .clientId("client") + .build() + .build()} + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .session() + .flags("CLEAN_START") + .qosMax(2) + .packetSizeMax(66560) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS") + .clientId("client") + .build() + .build()} + +connected + +write zilla:data.empty +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} + +write ${mqtt:session() + .subscription("sensor/one", 1) + .subscription("sensor/two", 1) + .build()} +write flush + +read zilla:data.ext ${mqtt:dataEx() + .typeId(zilla:id("mqtt")) + .session() + .kind("STATE") + .build() + .build()} + +read ${mqtt:session() + .build()} + +write ${mqtt:session() + .subscription("sensor/two", 1, 135) + .build()} +write flush + + +accepted + +read zilla:begin.ext ${mqtt:matchBeginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1) + .filter("sensor/two", 1) + .build() + .build()} + +connected + +read advised zilla:flush ${mqtt:flushEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .filter("sensor/two", 1) + .build() + .build()} diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt index 0a09934b3b..6885400b6d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -35,11 +36,4 @@ read [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -write [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained - write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt index becde28c74..dd011faafc 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -36,11 +37,4 @@ write [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -read [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained - read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt index 01bae13c04..2fbda6f599 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -35,11 +36,4 @@ read [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -write [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained - write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt index 9f3305413e..6cdf70836b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -36,11 +37,4 @@ write [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -read [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained - read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt index 21af97d26a..aff52eb2c4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -35,11 +36,4 @@ read [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -write [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained - read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt index 11fe55d81a..ad5d2b332c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -36,11 +37,4 @@ write [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -read [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 - [0x02] # properties - [0x0b 0x01] # subscription id = 1 - [0x00 0x0a] "sensor/one" # topic filter - [0x00] # options = at-most-once, send retained - write aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt new file mode 100644 index 0000000000..0272fd6bb4 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x23] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x20] # properties + [0x27] 50 # maximum packet size = 50 + [0x25 0x00] # retain unavailable + [0x2a 0x00] # shared subscription unavailable + [0x29 0x00] # subscription identifiers unavailable + [0x28 0x00] # wildcard subscription unavailable + [0x24 0x00] # maximum qos = at most once + [0x22] 0s # topic alias maximum = 0 + [0x13] 1s # keep alive = 1s + [0x12 0x00 0x08] "client-1" # assigned clientId + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt new file mode 100644 index 0000000000..0a7638048e --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x23] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x20] # properties + [0x27] 50 # maximum packet size = 50 + [0x25 0x00] # retain unavailable + [0x2a 0x00] # shared subscription unavailable + [0x29 0x00] # subscription identifiers unavailable + [0x28 0x00] # wildcard subscription unavailable + [0x24 0x00] # maximum qos = at most once + [0x22] 0s # topic alias maximum = 0 + [0x13] 1s # keep alive = 1s + [0x12 0x00 0x08] "client-1" # assigned clientId + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt new file mode 100644 index 0000000000..2c314341a5 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt @@ -0,0 +1,37 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x84] # reason code = unsupported protocol + [0x00] # properties = none + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt new file mode 100644 index 0000000000..cb13d52772 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt @@ -0,0 +1,38 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x03] # CONNACK + [0x00] # flags = none + [0x84] # reason code = unsupported protocol + [0x00] # properties = none + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt new file mode 100644 index 0000000000..173591b764 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code = success + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + +read notify RECEIVED_SESSION_STATE +read [0xe0 0x02] # DISCONNECT + [0x81] # malformed packet + [0x00] # properties = none + diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt new file mode 100644 index 0000000000..9d836c1473 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt @@ -0,0 +1,45 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code = success + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + +write await RECEIVED_SESSION_STATE +write [0xe0 0x02] # DISCONNECT + [0x81] # malformed packet + [0x00] # properties = none + +read abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt index d39897ba84..26db218806 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt @@ -21,7 +21,7 @@ connect "zilla://streams/net0" connected -write [0x10 0x23] # CONNECT +write [0x10 0x27] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x82] # flags = username, clean start diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt index e23f5550e2..32c8cdfc7d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt @@ -22,7 +22,7 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x23] # CONNECT +read [0x10 0x27] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x82] # flags = username, clean start diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt index e529445593..43e6312ee1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt @@ -21,17 +21,17 @@ connect "zilla://streams/net0" connected -write [0x10 0x0d] # CONNECT - [0x00 0x04] "MQTT" # protocol name - [0x05] # protocol version - [0x02] # flags = clean start - [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x00] # client id +write [0x10 0x0d] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x00] # client id -read [0x20 0x2f] # CONNACK - [0x00] # flags = none - [0x00] # reason code - [0x2c] # properties - [0x27] 66560 # maximum packet size = 66560 - [0x12 0x00 0x24] [0..36] # assigned clientId +read [0x20 0x11] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x0e] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x12 0x00 0x06] "client" # assigned clientId diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt index 14425c0795..aaa6880354 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt @@ -32,9 +32,9 @@ read [0x10 0x0d] # CONNECT [0x00] # properties = none [0x00 0x00] # client id -write [0x20 0x2f] # CONNACK +write [0x20 0x11] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x2c] # properties + [0x0e] # properties [0x27] 66560 # maximum packet size = 66560 - [0x12 0x00 0x24] ${assignedClientId} # assigned clientId + [0x12 0x00 0x06] "client" # assigned clientId diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt index 36e31e274d..61b54afa2f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt @@ -36,12 +36,3 @@ read [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -write [0x30 0x39] # PUBLISH - [0x00 0x0a] "sensor/one" # topic name - [0x25] # properties - [0x02] 0x0f # expiry = 15 seconds - [0x03 0x00 0x07] "message" # content type - [0x01 0x01] # format = utf-8 - [0x08 0x00 0x0a] "sensor/one" # response topic - [0x09 0x00 0x04] "info" # correlation data - "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt index bfb44bde10..adaf18979d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt index 56422a43bd..f283dd141c 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt new file mode 100644 index 0000000000..1c0ba53c63 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt @@ -0,0 +1,41 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + +write [0xe0 0x00] # DISCONNECT + +write close +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt new file mode 100644 index 0000000000..6261500d74 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt @@ -0,0 +1,42 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x13] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x00] # properties = none + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + +read [0xe0 0x00] # DISCONNECT + +read closed +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt index 25b1f63b22..79d7bff1ea 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt @@ -40,3 +40,4 @@ write [0xe0 0x02] # DISCONNECT [0x00] # properties = none write close +read closed diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt index 1d7188a18c..a32c69a84d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt @@ -41,3 +41,4 @@ read [0xe0 0x02] # DISCONNECT [0x00] # properties = none read closed +write close diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt new file mode 100644 index 0000000000..b54b872cda --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt @@ -0,0 +1,42 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x0b] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x13] 1s # keep alive = 1s + +write [0xc0 0x00] # PINGREQ + +write abort diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt new file mode 100644 index 0000000000..03208ff58b --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt @@ -0,0 +1,43 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x0b] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x13] 1s # keep alive = 1s + +read [0xc0 0x00] # PINGREQ + +read aborted diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt new file mode 100644 index 0000000000..8e0554766d --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt @@ -0,0 +1,46 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x0b] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x13] 1s # keep alive = 1s + +write [0xc0 0x00] # PINGREQ + +read [0xd0 0x00] # PINGRESP + +write [0xc0 0x00] # PINGREQ + +read [0xd0 0x00] # PINGRESP diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt new file mode 100644 index 0000000000..08899b5c28 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x0b] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x08] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x13] 1s # keep alive = 1s + +read [0xc0 0x00] # PINGREQ + +write [0xd0 0x00] # PINGRESP + +read [0xc0 0x00] # PINGREQ + +write [0xd0 0x00] # PINGRESP diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt index b2fdfa4427..e6fa483bfb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt @@ -21,13 +21,14 @@ connect "zilla://streams/net0" connected -write [0x10 0x11] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x04] "abcd" # client id + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK [0x00] # flags = none diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt index 8c6e16f18e..97dd222d7f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt @@ -22,13 +22,14 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x11] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x04] "abcd" # client id + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK [0x00] # flags = none diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt index 18ad3ac68d..a2414dec40 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -37,4 +38,4 @@ read [0x20 0x08] # CONNACK write [0x30 0x0d] # PUBLISH flags = at-most-once [0x00 0x0a] "sensor/one" # topic name - [0x00] # properties \ No newline at end of file + [0x00] # properties diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt index a47c03c9d6..794041b35f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt index 8e180c26df..d860770948 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt index b621d3f44d..e2c51a72f6 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt index e8a61b496e..96c43ea885 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt index 58cbb0a288..8d2e633257 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt index ff642d5d12..a694f7cdee 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt index 58cbb0a288..8d2e633257 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt index 9ab4ebe435..91f275b965 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt index 9e2f2cbd0c..436e7daad3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt index 5101f466af..a300b9d0eb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt @@ -36,7 +36,7 @@ read [0x20 0x0a] # CONNACK [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once -write [0x32 0x39] # PUBLISH, qos = at least once +write [0x32 0x3b] # PUBLISH, qos = at least once [0x00 0x0a] "sensor/one" # topic name [0x00 0x01] # packet id = 1 [0x25] # properties diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt index 379e720cfe..f98707b20f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt @@ -37,7 +37,7 @@ write [0x20 0x0a] # CONNACK [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once -read [0x32 0x39] # PUBLISH, qos = at least once +read [0x32 0x3b] # PUBLISH, qos = at least once [0x00 0x0a] "sensor/one" # topic name [0x00 0x01] # packet id = 1 [0x25] # properties diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt index cee75c68e9..99501dc869 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt @@ -36,7 +36,7 @@ read [0x20 0x0a] # CONNACK [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once -write [0x34 0x39] # PUBLISH, qos = exactly once +write [0x34 0x3b] # PUBLISH, qos = exactly once [0x00 0x0a] "sensor/one" # topic name [0x00 0x01] # packet id = 1 [0x25] # properties diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt index 490588aa66..15c93e65ca 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt @@ -37,7 +37,7 @@ write [0x20 0x0a] # CONNACK [0x27] 66560 # maximum packet size = 66560 [0x24 0x00] # maximum qos = at most once -read [0x34 0x39] # PUBLISH, qos = exactly once +read [0x34 0x3b] # PUBLISH, qos = exactly once [0x00 0x0a] "sensor/one" # topic name [0x00 0x01] # packet id = 1 [0x25] # properties diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt index 1c2057ce46..634a3e994d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt index 10ee4d4df2..05be6f0398 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt index e15d7931c4..f3bd76a134 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x31] # CONNECT +write [0x10 0x36] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id read [0x20 0x08] # CONNACK @@ -35,7 +36,7 @@ read [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -write [0x31 0x4a] # PUBLISH +write [0x30 0x4a] # PUBLISH [0x00 0x0a] "/sensors/1" # topic name [0x14] # properties [0x26] # user property id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt index 4470270545..124f3bcb3e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x31] # CONNECT +read [0x10 0x36] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id write [0x20 0x08] # CONNACK @@ -36,7 +37,7 @@ write [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x4a] # PUBLISH +read [0x30 0x4a] # PUBLISH [0x00 0x0a] "/sensors/1" # topic name [0x14] # properties [0x26] # user property id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt index 7ee13a5f8b..5a6125be45 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x31] # CONNECT +write [0x10 0x36] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id read [0x20 0x08] # CONNACK @@ -35,7 +36,7 @@ read [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -write [0x31 0x4a] # PUBLISH +write [0x30 0x4a] # PUBLISH [0x00 0x0a] "/sensors/1" # topic name [0x14] # properties [0x26] # user property id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt index 0186b1e4aa..4a7949c4b0 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x31] # CONNECT +read [0x10 0x36] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id write [0x20 0x08] # CONNACK @@ -36,7 +37,7 @@ write [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x4a] # PUBLISH +read [0x30 0x4a] # PUBLISH [0x00 0x0a] "/sensors/1" # topic name [0x14] # properties [0x26] # user property id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt index 6e3ea77da4..ca65cc54cb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x31] # CONNECT +write [0x10 0x36] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id read [0x20 0x08] # CONNACK @@ -35,7 +36,7 @@ read [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -write [0x31 0x3f] # PUBLISH +write [0x30 0x3f] # PUBLISH [0x00 0x0a] "/sensors/1" # topic name [0x09] # properties [0x26] # user property id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt index e4f952dd8b..551f767e18 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x31] # CONNECT +read [0x10 0x36] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x24] "755452d5-e2ef-4113-b9c6-2f53de96fd76" # client id write [0x20 0x08] # CONNACK @@ -36,7 +37,7 @@ write [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x3f] # PUBLISH +read [0x30 0x3f] # PUBLISH [0x00 0x0a] "/sensors/1" # topic name [0x09] # properties [0x26] # user property id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt index 5d745bb68a..9d1e963299 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -60,12 +61,13 @@ connect await FIRST_ABORTED connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x00] # flags = non clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt index 7aa9624f23..671fd9e422 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -55,12 +56,13 @@ write abort accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x00] # flags = non clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt index 35b8b9723b..ff89e2d340 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -64,12 +65,13 @@ connect await FIRST_CONNECTED connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x00] # flags = non clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt index e2325eeaf2..4b9121fce1 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -58,12 +59,13 @@ read closed accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x00] # flags = non clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt index 29b4a77410..d713df5127 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt @@ -33,8 +33,10 @@ write [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic write [0x00 0x1a] "client one session expired" # will payload -read [0x20 0x08] # CONNACK +read [0x20 0x0c] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x05] # properties = none + [0x09] # properties = none [0x27] 66560 # maximum packet size = 66560 + [0x24 0x00] # maximum qos = at most once + [0x2a 0x00] # shared subscription unavailable diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt index d1f8667469..7bc048cf43 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt @@ -34,8 +34,10 @@ read [0x10 0x3a] # CONNECT [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -write [0x20 0x08] # CONNACK +write [0x20 0x0c] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x05] # properties = none + [0x09] # properties = none [0x27] 66560 # maximum packet size = 66560 + [0x24 0x00] # maximum qos = at most once + [0x2a 0x00] # shared subscription unavailable diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt index 9f051d9142..6027302d83 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x10] # CONNECT +write [0x10 0x15] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x00] # flags = none [0x00 0x3c] # keep alive = 60s - [0x05] # properties + [0x0a] # properties + [0x27] 66560 # maximum packet size = 66560 [0x11] 1 # session expiry interval [0x00 0x03] "one" # client id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt index ad0d678c0c..077b01aa1f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x10] # CONNECT +read [0x10 0x15] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x00] # flags = none [0x00 0x3c] # keep alive = 60s - [0x05] # properties + [0x0a] # properties + [0x27] 66560 # maximum packet size = 66560 [0x11] 1 # session expiry interval [0x00 0x03] "one" # client id diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt index 9442e69a80..79ceb4828d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -65,12 +66,13 @@ connect await FIRST_CONNECTED connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt index 152272502b..779f119916 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -59,12 +60,13 @@ read closed accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt index 3124f2838a..5947d235ff 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt index f34e33da00..a81ad21637 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt index 429035ba02..f32db6815a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt index 04129949be..70e3bc9006 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt index f862e9b39b..3d57769f39 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -46,5 +47,3 @@ read [0x90 0x04] # SUBACK [0x00 0x01] # packet id = 1 [0x00] # properties = none [0x00] # reason code - -write notify SUBSCRIBED \ No newline at end of file diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt index 7c5aa38256..aa396a2aa4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt @@ -22,18 +22,19 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x05] # properties = none + [0x05] # properties [0x27] 66560 # maximum packet size = 66560 read [0x82 0x12] # SUBSCRIBE diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt index 186b1e219e..95cb388d90 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3C] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt index cbc4de3ba7..5138218a62 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3C] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt index 01fca82896..562889475f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x3a] # CONNECT +write [0x10 0x3f] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x06] # flags = will flag, clean start [0x00 0x0a] # keep alive = 10s - [0x00] # properties + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x03] "one" # client id [0x02] # will properties [0x01 0x01] # format = utf-8 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt index 5996116b17..f3c7b6c9e9 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x3a] # CONNECT +read [0x10 0x3f] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x06] # flags = will flag, clean start [0x00 0x0a] # keep alive = 10s - [0x00] # properties + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x03] "one" # client id [0x02] # will properties [0x01 0x01] # format = utf-8 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt index e1600b0b51..35e3a75b84 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x3a] # CONNECT +write [0x10 0x3f] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x06] # flags = will flag, clean start [0x00 0x0a] # keep alive = 10s - [0x00] # properties + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x03] "one" # client id [0x02] # will properties [0x01 0x01] # format = utf-8 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt index 6a1b53ddf3..ed764ec0ef 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x3a] # CONNECT +read [0x10 0x3f] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x06] # flags = will flag, clean start [0x00 0x0a] # keep alive = 10s - [0x00] # properties + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x03] "one" # client id [0x02] # will properties [0x01 0x01] # format = utf-8 diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt index a3cb27a295..1dd62ebb20 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt @@ -21,20 +21,23 @@ connect "zilla://streams/net0" connected -write [0x10 0x3a] # CONNECT +write [0x10 0x3f] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x26] # flags = will retain, will flag, clean start - [0x00 0x0a] # keep alive = 10s - [0x00] # properties + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x03] "one" # client id [0x02] # will properties [0x01 0x01] # format = utf-8 [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -read [0x20 0x08] # CONNACK +read [0x20 0x0c] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x05] # properties = none + [0x09] # properties = none [0x27] 66560 # maximum packet size = 66560 + [0x24 0x00] # maximum qos = at most once + [0x2a 0x00] # shared subscription unavailable diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt index d1f8667469..b3acd9ab04 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt @@ -22,20 +22,23 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x3a] # CONNECT +read [0x10 0x3f] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x26] # flags = will retain, will flag, clean start - [0x00 0x0a] # keep alive = 10s - [0x00] # properties + [0x00 0x3c] # keep alive = 10s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x03] "one" # client id [0x02] # will properties [0x01 0x01] # format = utf-8 [0x00 0x09] "wills/one" # will topic [0x00 0x1a] "client one session expired" # will payload -write [0x20 0x08] # CONNACK +write [0x20 0x0c] # CONNACK [0x00] # flags = none [0x00] # reason code - [0x05] # properties = none + [0x09] # properties = none [0x27] 66560 # maximum packet size = 66560 + [0x24 0x00] # maximum qos = at most once + [0x2a 0x00] # shared subscription unavailable diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt index e9bc4c9e79..277b69e0bd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt index 61944e33f0..b0ee5d909d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt index f0e68093c7..23aa9e6031 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt index d7e28e9599..fdef560e9b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt index 503cc35fe3..ecdeb9a31f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt index 8265375b49..c47f9ac934 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt index 299bbd396b..b337452ffd 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt index 71a46760c9..2fc37a55f7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt index 4b1143221b..05c02a18c3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt index 2f5bb0a797..a51e094552 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -59,4 +60,4 @@ write [0x30 0x19] # PUBLISH [0x04] # properties [0x0b 0x01] # subscription id = 1 [0x01 0x01] # format = utf-8 - "message2" # payload \ No newline at end of file + "message2" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt index ba6e06343f..3f96479fd4 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt @@ -21,13 +21,14 @@ connect "zilla://streams/net0" connected -write [0x10 0x14] # CONNECT +write [0x10 0x1a] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x07] "client2" # client id + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x08] "client-1" # client id read [0x20 0x08] # CONNACK [0x00] # flags = none @@ -35,9 +36,10 @@ read [0x20 0x08] # CONNACK [0x05] # properties [0x27] 66560 # maximum packet size = 66560 -write [0x31 0x14] # PUBLISH flags = at-most-once, retain +write [0x31 0x16] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name - [0x00] # properties + [0x02] # properties + [0x01 0x01] # format = utf-8 "message" # payload write notify PUBLISHED @@ -50,13 +52,14 @@ connect await PUBLISHED connected -write [0x10 0x14] # CONNECT +write [0x10 0x1a] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x07] "client1" # client id + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x08] "client-2" # client id read [0x20 0x08] # CONNACK [0x00] # flags = none diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt index 436b990840..577e3c02eb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt @@ -22,13 +22,14 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x14] # CONNECT +read [0x10 0x1a] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x07] "client2" # client id + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x08] "client-1" # client id write [0x20 0x08] # CONNACK [0x00] # flags = none @@ -36,9 +37,10 @@ write [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x14] # PUBLISH flags = at-most-once, retain +read [0x31 0x16] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name - [0x00] # properties + [0x02] # properties + [0x01 0x01] # format = utf-8 "message" # payload @@ -46,13 +48,14 @@ read [0x31 0x14] # PUBLISH flags = at-most-once, retain accepted connected -read [0x10 0x14] # CONNECT +read [0x10 0x1a] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none - [0x00 0x07] "client1" # client id + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x08] "client-2" # client id write [0x20 0x08] # CONNACK [0x00] # flags = none diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt index 1fbfe7f35c..409f0fe373 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x14] # CONNECT +write [0x10 0x19] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x07] "client1" # client id read [0x20 0x08] # CONNACK @@ -35,9 +36,10 @@ read [0x20 0x08] # CONNACK [0x05] # properties [0x27] 66560 # maximum packet size = 66560 -write [0x31 0x14] # PUBLISH flags = at-most-once, retain +write [0x31 0x16] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name - [0x00] # properties + [0x02] # properties + [0x01 0x01] # format = utf-8 "message" # payload write notify RETAINED_PUBLISHED @@ -51,12 +53,13 @@ connect await RETAINED_PUBLISHED connected -write [0x10 0x14] # CONNECT +write [0x10 0x19] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x07] "client2" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt index d67bc10f41..421ddb838e 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x14] # CONNECT +read [0x10 0x19] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x07] "client1" # client id write [0x20 0x08] # CONNACK @@ -36,20 +37,22 @@ write [0x20 0x08] # CONNACK [0x05] # properties = none [0x27] 66560 # maximum packet size = 66560 -read [0x31 0x14] # PUBLISH flags = at-most-once, retain +read [0x31 0x16] # PUBLISH flags = at-most-once, retain [0x00 0x0a] "sensor/one" # topic name - [0x00] # properties + [0x02] # properties + [0x01 0x01] # format = utf-8 "message" # payload accepted connected -read [0x10 0x14] # CONNECT +read [0x10 0x19] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x07] "client2" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt index 8a69f38f4a..0bca00b003 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -48,14 +49,14 @@ read [0x90 0x04] # SUBACK [0x00] # reason code write [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x0a] "sensor/+/1" # topic filter [0x20] # options = at-most-once read [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt index e083bdc4d5..154bbbe696 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -49,14 +50,14 @@ write [0x90 0x04] # SUBACK [0x00] # reason code read [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x0a] "sensor/+/1" # topic filter [0x20] # options = at-most-once write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt index 1c5fd3fec3..f4dc20f166 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt index 1c18976493..3ffcde5079 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt index f293f66014..783b486b3f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt index c7294e3326..5a565e96fb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt new file mode 100644 index 0000000000..b8c281be48 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt @@ -0,0 +1,65 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + +write [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +read [0x30 0x1b] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x07] # properties + [0x23 0x00 0x01] # topic alias = 1 + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload + +read [0x30 0x12] # PUBLISH + [0x00 0x00] # topic name + [0x07] # properties + [0x23 0x00 0x01] # topic alias = 1 + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message2" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt new file mode 100644 index 0000000000..4c53a33405 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt @@ -0,0 +1,66 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +write [0x30 0x1b] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x07] # properties + [0x23 0x00 0x01] # topic alias = 1 + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload + +write [0x30 0x12] # PUBLISH + [0x00 0x00] # topic name + [0x07] # properties + [0x23 0x00 0x01] # topic alias = 1 + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message2" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt new file mode 100644 index 0000000000..b337452ffd --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt @@ -0,0 +1,56 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +write [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +read [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x04] # properties + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload \ No newline at end of file diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt new file mode 100644 index 0000000000..afc507ccc7 --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt @@ -0,0 +1,64 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +read [0x82 0x12] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +write [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/one" # topic name + [0x04] # properties + [0x0b 0x01] # subscription id = 1 + [0x01 0x01] # format = utf-8 + "message" # payload + +write [0x30 0x18] # PUBLISH + [0x00 0x0a] "sensor/two" # topic name + [0x04] # properties + [0x0b 0x02] # subscription id = 2 + [0x01 0x01] # format = utf-8 + "message" # payload diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt index 564d33f3a5..453f097b77 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt index c17d971cc0..6d00c8560d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt index 92d5d93d68..227e0beb21 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt index 15e744e5f5..c597263fca 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt index 5c5f4b3eab..c99c84404f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt index 96650fc06a..6ddf6646ee 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt index 434ce18b3c..3d57769f39 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt index 7c914c5b51..d02df55929 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt index a357131f7e..b485f291fb 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt index 8aceb59618..9c5b520f69 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt index 9ecd5c519f..d2233d81cf 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt index 972062cbac..d34fd3f718 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt index e1d584fb47..2f9d51154f 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt index cfc2382136..71cdb546db 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt index ea62596109..2c383704f5 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt index 4de6727ff5..f08abd750d 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt index ecc300141d..c4ce6f4248 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt index 435e7ef4eb..77fc3304c7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt index c45a415a66..d89bb05a76 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -48,13 +49,13 @@ read [0x90 0x04] # SUBACK [0x00] # reason code write [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x0a] "sensor/two" # topic filter [0x20] # options = at-most-once read [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt index 45b23519ab..77a0e0c46b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -49,13 +50,13 @@ write [0x90 0x04] # SUBACK [0x00] # reason code read [0x82 0x12] # SUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x0a] "sensor/two" # topic filter [0x20] # options = at-most-once write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt index 51b7af1649..465075bc5a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -48,13 +49,13 @@ read [0x90 0x04] # SUBACK [0x00] # reason code write [0x82 0x10] # SUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x08] "device/#" # topic filter [0x20] # options = at-most-once read [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt index 24855d9f90..2612d5d1e7 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -49,13 +50,13 @@ write [0x90 0x04] # SUBACK [0x00] # reason code read [0x82 0x10] # SUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x08] "device/#" # topic filter [0x20] # options = at-most-once write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt index 532b39fab8..b58790c323 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -48,13 +49,13 @@ read [0x90 0x04] # SUBACK [0x00] # reason code write [0x82 0x10] # SUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x08] "device/#" # topic filter [0x20] # options = at-most-once read [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt index aeadc62da8..1021208d2a 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -49,13 +50,13 @@ write [0x90 0x04] # SUBACK [0x00] # reason code read [0x82 0x10] # SUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x02] # properties [0x0b 0x02] # subscription id = 2 [0x00 0x08] "device/#" # topic filter [0x20] # options = at-most-once write [0x90 0x04] # SUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason code diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt new file mode 100644 index 0000000000..5a3336f50e --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt @@ -0,0 +1,52 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +write [0x82 0x1f] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x05] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00 0x87] # reason codes = Success, Not authorized diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt new file mode 100644 index 0000000000..b64c8365cd --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt @@ -0,0 +1,53 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +read [0x82 0x1f] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x05] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00 0x87] # reason codes = Success, Not authorized diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt index 412623486b..b0a40a4e2b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt index 13430ba6e8..f5c636679b 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt index 604012c7b4..23a116d772 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3C] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -48,11 +49,11 @@ read [0x90 0x04] # SUBACK [0x00] # reason code write [0xa2 0x0f] # UNSUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00 0x0a] "sensor/one" # topic filter read [0xb0 0x04] # UNSUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # unsubscribe = success diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt index 33dc6f6484..fafc1898d8 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3C] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -49,11 +50,11 @@ write [0x90 0x04] # SUBACK [0x00] # reason code read [0xa2 0x0f] # UNSUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00 0x0a] "sensor/one" # topic filter write [0xb0 0x04] # UNSUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # unsubscribe = success diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt index 98237eb68a..a5218b69f3 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -51,12 +52,12 @@ read [0x90 0x05] # SUBACK [0x00 0x00] # reason code write [0xa2 0x1b] # UNSUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00 0x0a] "sensor/one" # topic filter [0x00 0x0a] "sensor/two" # topic filter read [0xb0 0x05] # UNSUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00 0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt index fa8c2a909b..5469d03e35 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -53,12 +54,12 @@ write [0x90 0x05] # SUBACK read [0xa2 0x1b] # UNSUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00 0x0a] "sensor/one" # topic filter [0x00 0x0a] "sensor/two" # topic filter write [0xb0 0x05] # UNSUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00 0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt index 9389db8c48..501c328d13 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt @@ -21,12 +21,13 @@ connect "zilla://streams/net0" connected -write [0x10 0x13] # CONNECT +write [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id read [0x20 0x08] # CONNACK @@ -51,11 +52,11 @@ read [0x90 0x05] # SUBACK [0x00 0x00] # reason code write [0xa2 0x0f] # UNSUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00 0x0a] "sensor/one" # topic filter read [0xb0 0x04] # UNSUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt index 5bb6ef2c12..8ad80ca804 100644 --- a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt @@ -22,12 +22,13 @@ accept "zilla://streams/net0" accepted connected -read [0x10 0x13] # CONNECT +read [0x10 0x18] # CONNECT [0x00 0x04] "MQTT" # protocol name [0x05] # protocol version [0x02] # flags = clean start [0x00 0x3c] # keep alive = 60s - [0x00] # properties = none + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 [0x00 0x06] "client" # client id write [0x20 0x08] # CONNACK @@ -53,11 +54,11 @@ write [0x90 0x05] # SUBACK read [0xa2 0x0f] # UNSUBSCRIBE - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00 0x0a] "sensor/one" # topic filter write [0xb0 0x04] # UNSUBACK - [0x00 0x01] # packet id = 1 + [0x00 0x02] # packet id = 2 [0x00] # properties = none [0x00] # reason codes diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt new file mode 100644 index 0000000000..fdbbb144bc --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt @@ -0,0 +1,63 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + +write [0x82 0x1f] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x05] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00 0x00] # reason code + +write [0xa2 0x1b] # UNSUBSCRIBE + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00 0x0a] "sensor/one" # topic filter + [0x00 0x0a] "sensor/two" # topic filter + +read [0xb0 0x05] # UNSUBACK + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00 0x87] # reason codes = Success, Not authorized diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt new file mode 100644 index 0000000000..b8ddf0d53a --- /dev/null +++ b/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt @@ -0,0 +1,65 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +read [0x82 0x1f] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x02] # properties + [0x0b 0x01] # subscription id = 1 + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x05] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00 0x00] # reason codes + + +read [0xa2 0x1b] # UNSUBSCRIBE + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00 0x0a] "sensor/one" # topic filter + [0x00 0x0a] "sensor/two" # topic filter + +write [0xb0 0x05] # UNSUBACK + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00 0x87] # reason codes = Success, Not authorized diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java index 6cc86dcb42..2b2b12b726 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java @@ -64,6 +64,9 @@ public void shouldEncodeMqttSessionBeginExt() .session() .flags("WILL", "CLEAN_START") .expiry(30) + .qosMax(1) + .packetSizeMax(100) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") .clientId("client") .build() .build(); @@ -74,6 +77,9 @@ public void shouldEncodeMqttSessionBeginExt() assertEquals(2, mqttBeginEx.kind()); assertEquals("client", mqttBeginEx.session().clientId().asString()); assertEquals(30, mqttBeginEx.session().expiry()); + assertEquals(1, mqttBeginEx.session().qosMax()); + assertEquals(100, mqttBeginEx.session().packetSizeMax()); + assertEquals(7, mqttBeginEx.session().capabilities()); assertEquals(6, mqttBeginEx.session().flags()); } @@ -284,6 +290,9 @@ public void shouldMatchSessionBeginExtension() throws Exception .session() .flags("CLEAN_START") .expiry(10) + .qosMax(1) + .packetSizeMax(100) + .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS") .clientId("client") .build() .build(); @@ -296,6 +305,9 @@ public void shouldMatchSessionBeginExtension() throws Exception .session(s -> s .flags(2) .expiry(10) + .qosMax(1) + .packetSizeMax(100) + .capabilities(7) .clientId("client")) .build(); @@ -1172,12 +1184,14 @@ public void shouldEncodeMqttResetEx() final byte[] array = MqttFunctions.resetEx() .typeId(0) .serverRef("mqtt-1.example.com:1883") + .reasonCode(0) .build(); DirectBuffer buffer = new UnsafeBuffer(array); MqttResetExFW mqttResetEx = new MqttResetExFW().wrap(buffer, 0, buffer.capacity()); assertEquals(0, mqttResetEx.typeId()); assertEquals("mqtt-1.example.com:1883", mqttResetEx.serverRef().asString()); + assertEquals(0, mqttResetEx.reasonCode()); } @Test @@ -1185,7 +1199,7 @@ public void shouldEncodeMqttSessionState() { final byte[] array = MqttFunctions.session() .subscription("sensor/one", 1, "AT_MOST_ONCE", "SEND_RETAINED") - .subscription("sensor/two") + .subscription("sensor/two", 1, 0) .build(); DirectBuffer buffer = new UnsafeBuffer(array); @@ -1201,7 +1215,9 @@ public void shouldEncodeMqttSessionState() assertNotNull(sessionState.subscriptions() .matchFirst(f -> "sensor/two".equals(f.pattern().asString()) && + 1 == f.subscriptionId() && 0 == f.qos() && + 0 == f.reasonCode() && 0b0000 == f.flags())); } @@ -1261,7 +1277,7 @@ public void shouldEncodeWillMessageBytesPayload() assertEquals("will.client", willMessage.topic().asString()); assertEquals(1, willMessage.flags()); assertEquals(0b0001, willMessage.flags()); - assertEquals("BINARY", willMessage.format().toString()); + assertEquals("NONE", willMessage.format().toString()); assertEquals("response_topic", willMessage.responseTopic().asString()); assertEquals("request-id-1", willMessage.correlation() .bytes().get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o))); diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java index 915c4b0e31..63a26568cb 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java @@ -45,15 +45,6 @@ public void shouldReceiveClientSentAbort() throws Exception k3po.finish(); } - @Test - @Specification({ - "${app}/client.sent.close/client", - "${app}/client.sent.close/server"}) - public void shouldReceiveClientSentClose() throws Exception - { - k3po.finish(); - } - @Test @Specification({ "${app}/connect.max.packet.size.exceeded/client", @@ -72,4 +63,57 @@ public void shouldDisconnectAfterSubscribeAndPublish() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/connect.non.successful.connack/client", + "${app}/connect.non.successful.connack/server"}) + public void shouldResetWithReasonCodeOnNonSuccessfulConnack() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/connect.non.successful.disconnect/client", + "${app}/connect.non.successful.disconnect/server"}) + public void shouldResetWithReasonCodeOnNonSuccessfulDisconnect() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/connect.delegate.connack.properties/client", + "${app}/connect.delegate.connack.properties/server"}) + public void shouldDelegateConnackProperties() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/connect.retain.not.supported/client", + "${app}/connect.retain.not.supported/server"}) + public void shouldConnectWithRetainNotSupported() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/connect.reject.will.retain.not.supported/client", + "${app}/connect.reject.will.retain.not.supported/server"}) + public void shouldRejectConnectWillRetainNotSupported() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/connect.maximum.qos.0/client", + "${app}/connect.maximum.qos.0/server"}) + public void shouldConnectWithMaximumQos0() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java index 61e8b7ce38..ce701e9387 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java @@ -37,6 +37,15 @@ public class SessionIT public final TestRule chain = outerRule(k3po).around(timeout); + @Test + @Specification({ + "${app}/session.connect/client", + "${app}/session.connect/server"}) + public void shouldConnect() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${app}/session.connect.with.session.expiry/client", diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java index d4e77f92a8..3ffc0a1d49 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java @@ -225,4 +225,30 @@ public void shouldNotReplayRetained() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/subscribe.receive.messages.topic.alias.repeated/client", + "${app}/subscribe.receive.messages.topic.alias.repeated/server"}) + public void shouldReceiveMessagesTopicAliasRepeated() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/subscribe.topic.filters.non.successful/client", + "${app}/subscribe.topic.filters.non.successful/server"}) + public void shouldFilterNonSuccessful() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${app}/subscribe.reconnect.publish.no.subscription/client", + "${app}/subscribe.reconnect.publish.no.subscription/server"}) + public void shouldReceiveReconnectNoSubscription() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java index 3d8e13e535..d52937fc19 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java @@ -72,4 +72,12 @@ public void shouldAcknowledgeAndPublishUnfragmented() throws Exception k3po.finish(); } + @Test + @Specification({ + "${app}/unsubscribe.topic.filters.non.successful/client", + "${app}/unsubscribe.topic.filters.non.successful/server"}) + public void shouldAcknowledgeNonSuccessful() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java index 31b78f8578..928fb50e36 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java @@ -436,4 +436,40 @@ public void shouldRejectInvalidSessionExpiryOnDisconnect() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/connect.non.successful.connack/client", + "${net}/connect.non.successful.connack/server"}) + public void shouldResetWithReasonCodeOnNonSuccessfulConnack() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/connect.non.successful.disconnect/client", + "${net}/connect.non.successful.disconnect/server"}) + public void shouldResetWithReasonCodeOnNonSuccessfulDisconnect() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/connect.delegate.connack.properties/client", + "${net}/connect.delegate.connack.properties/server"}) + public void shouldDelegateConnackProperties() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/disconnect.no.reasoncode.no.properties/client", + "${net}/disconnect.no.reasoncode.no.properties/server"}) + public void shouldConnectThenDisconnectWithNoReasonCodeNoProperties() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java index 5b35abb5ad..8409d47310 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java @@ -45,6 +45,24 @@ public void shouldConnectThenPingRequestResponse() throws Exception k3po.finish(); } + @Test + @Specification({ + "${net}/ping.server.override.keep.alive/client", + "${net}/ping.server.override.keep.alive/server"}) + public void shouldPingServerOverridesKeepAlive() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/ping.no.pingresp/client", + "${net}/ping.no.pingresp/server"}) + public void shouldCloseWhenPingRequestNoResponseInTimeout() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${net}/ping.keep.alive/client", diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java index 675e20f3b2..db43f319ee 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java @@ -319,4 +319,31 @@ public void shouldNotReplayRetained() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/subscribe.receive.messages.topic.alias.repeated/client", + "${net}/subscribe.receive.messages.topic.alias.repeated/server"}) + public void shouldReceiveMessagesTopicAliasRepeated() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/subscribe.topic.filters.non.successful/client", + "${net}/subscribe.topic.filters.non.successful/server"}) + public void shouldFilterNonSuccessful() throws Exception + { + k3po.finish(); + } + + @Test + @Specification({ + "${net}/subscribe.reconnect.publish.no.subscription/client", + "${net}/subscribe.reconnect.publish.no.subscription/server"}) + public void shouldReceiveReconnectNoSubscription() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java index 865c0a07fc..3fc3f53936 100644 --- a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java +++ b/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java @@ -111,4 +111,13 @@ public void shouldAcknowledgeAndPublishUnfragmented() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${net}/unsubscribe.topic.filters.non.successful/client", + "${net}/unsubscribe.topic.filters.non.successful/server"}) + public void shouldAcknowledgeNonSuccessful() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java index eae9c10a13..dddd632e52 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java @@ -15,11 +15,13 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.internal; +import static io.aklivity.zilla.runtime.engine.config.KindConfig.CLIENT; import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; import java.util.EnumMap; import java.util.Map; +import io.aklivity.zilla.runtime.binding.mqtt.internal.stream.MqttClientFactory; import io.aklivity.zilla.runtime.binding.mqtt.internal.stream.MqttServerFactory; import io.aklivity.zilla.runtime.binding.mqtt.internal.stream.MqttStreamFactory; import io.aklivity.zilla.runtime.engine.EngineContext; @@ -38,7 +40,7 @@ final class MqttBindingContext implements BindingContext { final EnumMap factories = new EnumMap<>(KindConfig.class); factories.put(SERVER, new MqttServerFactory(config, context)); - //factories.put(CLIENT, new MqttClientFactory(config, context)); + factories.put(CLIENT, new MqttClientFactory(config, context)); this.factories = factories; } diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java index 1e4a84e2d2..a6f0fc1357 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java @@ -23,16 +23,16 @@ public class MqttConfiguration extends Configuration { private static final ConfigurationDef MQTT_CONFIG; public static final LongPropertyDef CONNECT_TIMEOUT; + public static final LongPropertyDef CONNACK_TIMEOUT; public static final LongPropertyDef PUBLISH_TIMEOUT; public static final ShortPropertyDef KEEP_ALIVE_MINIMUM; public static final ShortPropertyDef KEEP_ALIVE_MAXIMUM; public static final BytePropertyDef MAXIMUM_QOS; public static final BooleanPropertyDef RETAIN_AVAILABLE; public static final ShortPropertyDef TOPIC_ALIAS_MAXIMUM; - public static final BooleanPropertyDef WILDCARD_SUBSCRIPTION_AVAILABLE; - public static final BooleanPropertyDef SUBSCRIPTION_IDENTIFIERS_AVAILABLE; - public static final BooleanPropertyDef SHARED_SUBSCRIPTION_AVAILABLE; - public static final BooleanPropertyDef SESSIONS_AVAILABLE; + public static final BooleanPropertyDef WILDCARD_SUBSCRIPTION; + public static final BooleanPropertyDef SUBSCRIPTION_IDENTIFIERS; + public static final BooleanPropertyDef SHARED_SUBSCRIPTION; public static final BooleanPropertyDef NO_LOCAL; public static final IntPropertyDef SESSION_EXPIRY_GRACE_PERIOD; public static final PropertyDef CLIENT_ID; @@ -43,16 +43,16 @@ public class MqttConfiguration extends Configuration final ConfigurationDef config = new ConfigurationDef("zilla.binding.mqtt"); PUBLISH_TIMEOUT = config.property("publish.timeout", TimeUnit.SECONDS.toSeconds(30)); CONNECT_TIMEOUT = config.property("connect.timeout", TimeUnit.SECONDS.toSeconds(3)); + CONNACK_TIMEOUT = config.property("connack.timeout", TimeUnit.SECONDS.toSeconds(3)); //TODO: better default values? KEEP_ALIVE_MINIMUM = config.property("keep.alive.minimum", (short) 10); KEEP_ALIVE_MAXIMUM = config.property("keep.alive.maximum", (short) 1000); MAXIMUM_QOS = config.property("maximum.qos", (byte) 0); RETAIN_AVAILABLE = config.property("retain.available", true); TOPIC_ALIAS_MAXIMUM = config.property("topic.alias.maximum", (short) 0); - WILDCARD_SUBSCRIPTION_AVAILABLE = config.property("wildcard.subscription.available", true); - SUBSCRIPTION_IDENTIFIERS_AVAILABLE = config.property("subscription.identifiers.available", true); - SHARED_SUBSCRIPTION_AVAILABLE = config.property("shared.subscription.available", false); - SESSIONS_AVAILABLE = config.property("sessions.available", true); + WILDCARD_SUBSCRIPTION = config.property("wildcard.subscription.available", true); + SUBSCRIPTION_IDENTIFIERS = config.property("subscription.identifiers.available", true); + SHARED_SUBSCRIPTION = config.property("shared.subscription.available", false); NO_LOCAL = config.property("no.local", true); SESSION_EXPIRY_GRACE_PERIOD = config.property("session.expiry.grace.period", 30); CLIENT_ID = config.property("client.id"); @@ -76,6 +76,11 @@ public long connectTimeout() return CONNECT_TIMEOUT.get(this); } + public long connackTimeout() + { + return CONNACK_TIMEOUT.get(this); + } + public boolean retainAvailable() { return RETAIN_AVAILABLE.get(this); @@ -101,26 +106,6 @@ public short topicAliasMaximum() return TOPIC_ALIAS_MAXIMUM.get(this); } - public boolean wildcardSubscriptionAvailable() - { - return WILDCARD_SUBSCRIPTION_AVAILABLE.get(this); - } - - public boolean subscriptionIdentifierAvailable() - { - return SUBSCRIPTION_IDENTIFIERS_AVAILABLE.get(this); - } - - public boolean sharedSubscriptionAvailable() - { - return SHARED_SUBSCRIPTION_AVAILABLE.get(this); - } - - public boolean sessionsAvailable() - { - return SESSIONS_AVAILABLE.get(this); - } - public boolean noLocal() { return NO_LOCAL.get(this); diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java index 5ec4d0cf53..a85589a5b5 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java @@ -21,9 +21,9 @@ public final class MqttReasonCodes public static final byte NORMAL_DISCONNECT = 0x00; - public static final byte GRANTED_QOS_1 = 0x00; - public static final byte GRANTED_QOS_2 = 0x01; - public static final byte GRANTED_QOS_3 = 0x02; + public static final byte GRANTED_QOS_0 = 0x00; + public static final byte GRANTED_QOS_1 = 0x01; + public static final byte GRANTED_QOS_2 = 0x02; public static final byte DISCONNECT_WITH_WILL_MESSAGE = 0x04; diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java index a307aaab82..8f8021ea0f 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java @@ -15,6 +15,4122 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; -public class MqttClientFactory +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.BAD_AUTHENTICATION_METHOD; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.MALFORMED_PACKET; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.NORMAL_DISCONNECT; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.PACKET_TOO_LARGE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.PAYLOAD_FORMAT_INVALID; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.PROTOCOL_ERROR; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.QOS_NOT_SUPPORTED; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.RETAIN_NOT_SUPPORTED; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.SUCCESS; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.TOPIC_ALIAS_INVALID; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttPublishFlags.RETAIN; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttSubscribeFlags.NO_LOCAL; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttSubscribeFlags.RETAIN_AS_PUBLISHED; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttSubscribeFlags.SEND_RETAINED; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_ASSIGNED_CLIENT_ID; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_AUTHENTICATION_DATA; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_AUTHENTICATION_METHOD; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_CONTENT_TYPE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_CORRELATION_DATA; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_EXPIRY_INTERVAL; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_MAXIMUM_PACKET_SIZE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_MAXIMUM_QO_S; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_PAYLOAD_FORMAT; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_RECEIVE_MAXIMUM; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_RESPONSE_TOPIC; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_RETAIN_AVAILABLE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_SERVER_KEEP_ALIVE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_SESSION_EXPIRY; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_SHARED_SUBSCRIPTION_AVAILABLE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_SUBSCRIPTION_ID; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_SUBSCRIPTION_IDS_AVAILABLE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_TOPIC_ALIAS; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_TOPIC_ALIAS_MAXIMUM; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_USER_PROPERTY; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW.KIND_WILDCARD_SUBSCRIPTION_AVAILABLE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.DataFW.FIELD_OFFSET_PAYLOAD; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttPublishDataExFW.Builder.DEFAULT_EXPIRY_INTERVAL; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttPublishDataExFW.Builder.DEFAULT_FORMAT; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttServerCapabilities.SHARED_SUBSCRIPTIONS; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttServerCapabilities.SUBSCRIPTION_IDS; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttServerCapabilities.WILDCARD; +import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_CREDITOR_INDEX; +import static io.aklivity.zilla.runtime.engine.budget.BudgetDebitor.NO_DEBITOR_INDEX; +import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; +import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; +import static java.nio.ByteOrder.BIG_ENDIAN; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; + +import java.nio.ByteBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.LongSupplier; +import java.util.function.LongUnaryOperator; +import java.util.stream.Collectors; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Int2ObjectHashMap; +import org.agrona.collections.Long2ObjectHashMap; +import org.agrona.collections.ObjectHashSet; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttBinding; +import io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttBindingConfig; +import io.aklivity.zilla.runtime.binding.mqtt.internal.config.MqttRouteConfig; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Array32FW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttBinaryFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttPayloadFormat; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttPayloadFormatFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttQoS; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttSessionFlags; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttSessionStateFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttTopicFilterFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttWillMessageFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.String16FW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Varuint32FW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttConnackFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttConnectFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttDisconnectFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPacketHeaderFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPacketType; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPingReqFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPingRespFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertiesFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPropertyFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttPublishFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttSubackFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttSubackPayloadFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttSubscribeFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttSubscribePayloadFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttUnsubackFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttUnsubackPayloadFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttUnsubscribeFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttUnsubscribePayloadFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttUserPropertyFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.codec.MqttWillFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.ExtensionFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.FlushFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttFlushExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttPublishBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttPublishDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttResetExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSessionBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSessionDataExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSubscribeBeginExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSubscribeFlushExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.SignalFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.budget.BudgetCreditor; +import io.aklivity.zilla.runtime.engine.budget.BudgetDebitor; +import io.aklivity.zilla.runtime.engine.buffer.BufferPool; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; +import io.aklivity.zilla.runtime.engine.config.BindingConfig; +import io.aklivity.zilla.specs.binding.mqtt.internal.types.stream.MqttServerCapabilities; + +public final class MqttClientFactory implements MqttStreamFactory { + private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0); + + private static final String16FW MQTT_PROTOCOL_NAME = new String16FW("MQTT", BIG_ENDIAN); + private static final int MQTT_PROTOCOL_VERSION = 5; + private static final int CONNACK_FIXED_HEADER = 0b0010_0000; + private static final int SUBACK_FIXED_HEADER = 0b1001_0000; + private static final int UNSUBACK_FIXED_HEADER = 0b1011_0000; + private static final int DISCONNECT_FIXED_HEADER = 0b1110_0000; + + private static final int NO_FLAGS = 0b0000_0000; + private static final int RETAIN_MASK = 0b0000_0001; + private static final int PUBLISH_QOS1_MASK = 0b0000_0010; + private static final int PUBLISH_QOS2_MASK = 0b0000_0100; + private static final int NO_LOCAL_FLAG_MASK = 0b0000_0100; + private static final int RETAIN_AS_PUBLISHED_MASK = 0b0000_1000; + private static final int RETAIN_HANDLING_MASK = 0b0011_0000; + + private static final int WILL_FLAG_MASK = 0b0000_0100; + private static final int WILL_QOS_MASK = 0b0001_1000; + private static final int WILL_RETAIN_MASK = 0b0010_0000; + + private static final int CONNACK_SESSION_PRESENT_MASK = 0b0000_0001; + private static final int CONNACK_RESERVED_FLAGS_MASK = 0b1111_1110; + + private static final int CONNACK_SESSION_EXPIRY_MASK = 0b0000_0000_0001; + private static final int CONNACK_MAXIMUM_QOS_MASK = 0b0000_0000_0010; + private static final int CONNACK_RETAIN_AVAILABLE_MASK = 0b0000_0000_0100; + private static final int CONNACK_MAXIMUM_PACKET_SIZE_MASK = 0b0000_0000_1000; + private static final int CONNACK_ASSIGNED_CLIENT_IDENTIFIER_MASK = 0b0000_0001_0000; + private static final int CONNACK_WILDCARD_SUBSCRIPTION_AVAILABLE_MASK = 0b0000_0010_0000; + private static final int CONNACK_SUBSCRIPTION_IDENTIFIERS_MASK = 0b0000_0100_0000; + private static final int CONNACK_SHARED_SUBSCRIPTION_AVAILABLE_MASK = 0b0000_1000_0000; + private static final int CONNACK_KEEP_ALIVE_MASK = 0b0001_0000_0000; + private static final int CONNACK_TOPIC_ALIAS_MAXIMUM_MASK = 0b0010_0000_0000; + + private static final int SHARED_SUBSCRIPTION_AVAILABLE_MASK = 1 << SHARED_SUBSCRIPTIONS.value(); + private static final int WILDCARD_AVAILABLE_MASK = 1 << WILDCARD.value(); + private static final int SUBSCRIPTION_IDS_AVAILABLE_MASK = 1 << SUBSCRIPTION_IDS.value(); + private static final int RETAIN_AVAILABLE_MASK = 1 << RETAIN.value(); + + private static final int RETAIN_FLAG = 1 << RETAIN.ordinal(); + private static final int SEND_RETAINED_FLAG = 1 << SEND_RETAINED.ordinal(); + private static final int RETAIN_AS_PUBLISHED_FLAG = 1 << RETAIN_AS_PUBLISHED.ordinal(); + private static final int NO_LOCAL_FLAG = 1 << NO_LOCAL.ordinal(); + private static final int DO_NOT_SEND_RETAINED_MASK = 0b0010_0000; + + private static final int PUBLISH_TYPE = 0x03; + + private static final int PUBLISH_EXPIRED_SIGNAL = 1; + private static final int KEEP_ALIVE_TIMEOUT_SIGNAL = 2; + private static final int CONNACK_TIMEOUT_SIGNAL = 3; + private static final int PINGRESP_TIMEOUT_SIGNAL = 4; + + private static final int PUBLISH_FRAMING = 255; + private static final int KEEP_ALIVE = 60000; + + private static final String16FW NULL_STRING = new String16FW((String) null); + public static final String SHARED_SUBSCRIPTION_LITERAL = "$share"; + + + private final BeginFW beginRO = new BeginFW(); + private final FlushFW flushRO = new FlushFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final WindowFW windowRO = new WindowFW(); + private final ResetFW resetRO = new ResetFW(); + private final SignalFW signalRO = new SignalFW(); + + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final FlushFW.Builder flushRW = new FlushFW.Builder(); + + private final ExtensionFW extensionRO = new ExtensionFW(); + private final MqttDataExFW mqttPublishDataExRO = new MqttDataExFW(); + private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); + private final MqttDataExFW mqttDataExRO = new MqttDataExFW(); + private final MqttFlushExFW mqttFlushExRO = new MqttFlushExFW(); + + private final MqttBeginExFW.Builder mqttSessionBeginExRW = new MqttBeginExFW.Builder(); + private final MqttDataExFW.Builder mqttPublishDataExRW = new MqttDataExFW.Builder(); + private final MqttResetExFW.Builder mqttResetExRW = new MqttResetExFW.Builder(); + private final MqttWillFW.Builder willMessageRW = new MqttWillFW.Builder(); + private final MqttPacketHeaderFW mqttPacketHeaderRO = new MqttPacketHeaderFW(); + private final MqttConnackFW mqttConnackRO = new MqttConnackFW(); + private final MqttSubackFW mqttSubackRO = new MqttSubackFW(); + private final MqttUnsubackFW mqttUnsubackRO = new MqttUnsubackFW(); + private final MqttWillFW mqttWillRO = new MqttWillFW(); + private final MqttWillMessageFW mqttWillMessageRO = new MqttWillMessageFW(); + private final MqttPublishFW mqttPublishRO = new MqttPublishFW(); + private final MqttSubackPayloadFW mqttSubackPayloadRO = new MqttSubackPayloadFW(); + private final MqttUnsubackPayloadFW mqttUnsubackPayloadRO = new MqttUnsubackPayloadFW(); + private final MqttSubscribePayloadFW.Builder mqttSubscribePayloadRW = new MqttSubscribePayloadFW.Builder(); + private final MqttUnsubscribePayloadFW.Builder mqttUnsubscribePayloadRW = new MqttUnsubscribePayloadFW.Builder(); + private final MqttPingRespFW mqttPingRespRO = new MqttPingRespFW(); + private final MqttDisconnectFW mqttDisconnectRO = new MqttDisconnectFW(); + + private final OctetsFW octetsRO = new OctetsFW(); + private final OctetsFW.Builder octetsRW = new OctetsFW.Builder(); + + private final MqttPropertyFW mqttPropertyRO = new MqttPropertyFW(); + private final MqttPropertyFW.Builder mqttPropertyRW = new MqttPropertyFW.Builder(); + private final MqttPropertyFW.Builder mqttWillPropertyRW = new MqttPropertyFW.Builder(); + private final MqttSessionStateFW.Builder mqttSessionStateRW = new MqttSessionStateFW.Builder(); + + private final MqttSessionStateFW mqttSessionStateRO = new MqttSessionStateFW(); + + private final String16FW contentTypeRO = new String16FW(BIG_ENDIAN); + private final String16FW responseTopicRO = new String16FW(BIG_ENDIAN); + + private final MqttPublishHeader mqttPublishHeaderRO = new MqttPublishHeader(); + + private final MqttConnectFW.Builder mqttConnectRW = new MqttConnectFW.Builder(); + private final MqttSubscribeFW.Builder mqttSubscribeRW = new MqttSubscribeFW.Builder(); + private final MqttUnsubscribeFW.Builder mqttUnsubscribeRW = new MqttUnsubscribeFW.Builder(); + private final MqttPublishFW.Builder mqttPublishRW = new MqttPublishFW.Builder(); + private final MqttPingReqFW.Builder mqttPingReqRW = new MqttPingReqFW.Builder(); + private final MqttDisconnectFW.Builder mqttDisconnectRW = new MqttDisconnectFW.Builder(); + private final Array32FW.Builder userPropertiesRW = + new Array32FW.Builder<>(new MqttUserPropertyFW.Builder(), new MqttUserPropertyFW()); + private final Array32FW.Builder subscriptionIdsRW = + new Array32FW.Builder<>(new Varuint32FW.Builder(), new Varuint32FW()); + private final MqttClientDecoder decodeInitialType = this::decodeInitialType; + private final MqttClientDecoder decodePacketType = this::decodePacketType; + private final MqttClientDecoder decodeConnack = this::decodeConnack; + private final MqttClientDecoder decodeSuback = this::decodeSuback; + private final MqttClientDecoder decodeUnsuback = this::decodeUnsuback; + private final MqttClientDecoder decodePublish = this::decodePublish; + private final MqttClientDecoder decodePingresp = this::decodePingResp; + private final MqttClientDecoder decodeDisconnect = this::decodeDisconnect; + private final MqttClientDecoder decodeIgnoreAll = this::decodeIgnoreAll; + private final MqttClientDecoder decodeUnknownType = this::decodeUnknownType; + + private final Map decodersByPacketType; + private final Int2ObjectHashMap clients; + + private int maximumPacketSize; + + { + final Map decodersByPacketType = new EnumMap<>(MqttPacketType.class); + decodersByPacketType.put(MqttPacketType.CONNACK, decodeConnack); + decodersByPacketType.put(MqttPacketType.SUBACK, decodeSuback); + decodersByPacketType.put(MqttPacketType.UNSUBACK, decodeUnsuback); + decodersByPacketType.put(MqttPacketType.PUBLISH, decodePublish); + // decodersByPacketType.put(MqttPacketType.PUBREC, decodePubrec); + // decodersByPacketType.put(MqttPacketType.PUBREL, decodePubrel); + // decodersByPacketType.put(MqttPacketType.PUBCOMP, decodePubcomp); + decodersByPacketType.put(MqttPacketType.PINGRESP, decodePingresp); + decodersByPacketType.put(MqttPacketType.DISCONNECT, decodeDisconnect); + // decodersByPacketType.put(MqttPacketType.AUTH, decodeAuth); + this.decodersByPacketType = decodersByPacketType; + } + + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer extBuffer; + private final MutableDirectBuffer dataExtBuffer; + private final MutableDirectBuffer sessionStateBuffer; + private final MutableDirectBuffer payloadBuffer; + private final MutableDirectBuffer propertyBuffer; + private final MutableDirectBuffer userPropertiesBuffer; + private final MutableDirectBuffer subscriptionIdsBuffer; + private final MutableDirectBuffer willMessageBuffer; + private final MutableDirectBuffer willPropertyBuffer; + + private final ByteBuffer charsetBuffer; + private final BufferPool bufferPool; + private final BudgetCreditor creditor; + private final Signaler signaler; + private final MessageConsumer droppedHandler; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongSupplier supplyTraceId; + private final LongSupplier supplyBudgetId; + private final LongFunction supplyDebitor; + private final Long2ObjectHashMap bindings; + private final int mqttTypeId; + + private final long publishTimeoutMillis; + private final long connackTimeoutMillis; + private final int encodeBudgetMax; + + private final CharsetDecoder utf8Decoder; + + public MqttClientFactory( + MqttConfiguration config, + EngineContext context) + { + this.writeBuffer = context.writeBuffer(); + this.extBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.dataExtBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.sessionStateBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.propertyBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.userPropertiesBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.subscriptionIdsBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.payloadBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.charsetBuffer = ByteBuffer.wrap(new byte[writeBuffer.capacity()]); + this.willMessageBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.willPropertyBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.bufferPool = context.bufferPool(); + this.creditor = context.creditor(); + this.signaler = context.signaler(); + this.droppedHandler = context.droppedFrameHandler(); + this.streamFactory = context.streamFactory(); + this.supplyDebitor = context::supplyDebitor; + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyBudgetId = context::supplyBudgetId; + this.supplyTraceId = context::supplyTraceId; + this.bindings = new Long2ObjectHashMap<>(); + this.mqttTypeId = context.supplyTypeId(MqttBinding.NAME); + this.publishTimeoutMillis = SECONDS.toMillis(config.publishTimeout()); + this.connackTimeoutMillis = SECONDS.toMillis(config.connackTimeout()); + this.maximumPacketSize = writeBuffer.capacity(); + this.encodeBudgetMax = bufferPool.slotCapacity(); + this.utf8Decoder = StandardCharsets.UTF_8.newDecoder(); + this.clients = new Int2ObjectHashMap<>(); + } + + @Override + public void attach( + BindingConfig binding) + { + MqttBindingConfig mqttBinding = new MqttBindingConfig(binding); + bindings.put(binding.id, mqttBinding); + } + + @Override + public void detach( + long bindingId) + { + bindings.remove(bindingId); + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + + MqttBindingConfig binding = bindings.get(routedId); + + final MqttRouteConfig resolved = binding != null ? binding.resolve(authorization) : null; + + MessageConsumer newStream = null; + + if (resolved != null) + { + final long resolvedId = resolved.id; + + final OctetsFW extension = begin.extension(); + final ExtensionFW beginEx = extension.get(extensionRO::tryWrap); + assert beginEx != null; + final int typeId = beginEx.typeId(); + assert typeId == mqttTypeId; + + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + String16FW clientId; + MqttClient client; + switch (mqttBeginEx.kind()) + { + case MqttBeginExFW.KIND_SESSION: + clientId = mqttBeginEx.session().clientId(); + client = resolveClient(routedId, resolvedId, supplyInitialId.applyAsLong(resolvedId), clientId); + client.sessionStream = new MqttSessionStream(client, sender, originId, routedId, initialId); + newStream = client.sessionStream::onSession; + break; + case MqttBeginExFW.KIND_PUBLISH: + final MqttPublishBeginExFW publishBeginEx = mqttBeginEx.publish(); + clientId = publishBeginEx.clientId(); + client = resolveClient(routedId, resolvedId, supplyInitialId.applyAsLong(resolvedId), clientId); + MqttPublishStream publishStream = new MqttPublishStream(client, sender, originId, routedId, initialId); + newStream = publishStream::onPublish; + break; + case MqttBeginExFW.KIND_SUBSCRIBE: + final MqttSubscribeBeginExFW subscribeBeginEx = mqttBeginEx.subscribe(); + clientId = subscribeBeginEx.clientId(); + client = resolveClient(routedId, resolvedId, supplyInitialId.applyAsLong(resolvedId), clientId); + MqttSubscribeStream subscribeStream = new MqttSubscribeStream(client, sender, originId, routedId, initialId); + newStream = subscribeStream::onSubscribe; + break; + } + } + + return newStream; + } + + private MqttClient resolveClient( + long routedId, + long resolvedId, + long initialId, + String16FW clientId) + { + final int clientKey = clientKey(clientId.asString()); + return clients.computeIfAbsent(clientKey, + s -> new MqttClient(routedId, resolvedId, initialId, maximumPacketSize)); + } + + private int clientKey( + String client) + { + return Math.abs(client.hashCode()); + } + + private MessageConsumer newStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int index, + int length, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .payload(buffer, index, length) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doWindow( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + receiver.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Flyweight extension) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + private void doFlush( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int reserved, + Consumer extension) + { + final FlushFW flush = flushRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .reserved(reserved) + .extension(extension) + .build(); + + receiver.accept(flush.typeId(), flush.buffer(), flush.offset(), flush.sizeof()); + } + + private int decodeInitialType( + MqttClient client, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + final MqttPacketHeaderFW packet = mqttPacketHeaderRO.tryWrap(buffer, offset, limit); + + decode: + if (packet != null) + { + final int length = packet.remainingLength(); + final MqttPacketType packetType = MqttPacketType.valueOf(packet.typeAndFlags() >> 4); + + if (packetType != MqttPacketType.CONNACK) + { + client.doNetworkEnd(traceId, authorization); + client.decoder = decodeIgnoreAll; + break decode; + } + + client.decodeablePacketBytes = packet.sizeof() + length; + client.decoder = decodePacketType; + } + + return offset; + } + + private int decodePacketType( + MqttClient server, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + final MqttPacketHeaderFW packet = mqttPacketHeaderRO.tryWrap(buffer, offset, limit); + + if (packet != null) + { + final int length = packet.remainingLength(); + final MqttPacketType packetType = MqttPacketType.valueOf(packet.typeAndFlags() >> 4); + final MqttClientDecoder decoder = decodersByPacketType.getOrDefault(packetType, decodeUnknownType); + + if (packet.sizeof() + length > maximumPacketSize) + { + server.onDecodeError(traceId, authorization, PACKET_TOO_LARGE); + server.decoder = decodeIgnoreAll; + } + else if (limit - packet.limit() >= length) + { + server.decodeablePacketBytes = packet.sizeof() + length; + server.decoder = decoder; + } + } + + return offset; + } + + private int decodeConnack( + MqttClient client, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + final int length = limit - offset; + + int progress = offset; + + if (length > 0) + { + int reasonCode = SUCCESS; + + final MqttConnackFW connack = mqttConnackRO.tryWrap(buffer, offset, limit); + int flags = 0; + decode: + { + if (connack == null) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + + else if ((connack.typeAndFlags() & 0b1111_1111) != CONNACK_FIXED_HEADER) + { + reasonCode = MALFORMED_PACKET; + break decode; + } + + flags = connack.flags(); + + reasonCode = decodeConnackFlags(flags); + if (reasonCode != SUCCESS) + { + break decode; + } + + progress = client.onDecodeConnack(traceId, authorization, buffer, progress, limit, connack); + client.decoder = decodePacketType; + } + + if (reasonCode != SUCCESS) + { + client.onDecodeError(traceId, authorization, reasonCode); + client.decoder = decodeIgnoreAll; + } + } + + return progress; + } + + private int decodeSuback( + MqttClient client, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + final int length = limit - offset; + + int progress = offset; + + if (length > 0) + { + int reasonCode = SUCCESS; + + final MqttSubackFW suback = mqttSubackRO.tryWrap(buffer, offset, limit); + decode: + { + if (suback == null) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + else if ((suback.typeAndFlags() & 0b1111_1111) != SUBACK_FIXED_HEADER) + { + reasonCode = MALFORMED_PACKET; + break decode; + } + + progress = client.onDecodeSuback(traceId, authorization, buffer, progress, limit, suback); + client.decoder = decodePacketType; + } + + if (reasonCode != SUCCESS) + { + client.onDecodeError(traceId, authorization, reasonCode); + client.decoder = decodeIgnoreAll; + } + } + + return progress; + } + + private int decodeUnsuback( + MqttClient client, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + final int length = limit - offset; + + int progress = offset; + + if (length > 0) + { + int reasonCode = SUCCESS; + + final MqttUnsubackFW unsuback = mqttUnsubackRO.tryWrap(buffer, offset, limit); + decode: + { + if (unsuback == null) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + else if ((unsuback.typeAndFlags() & 0b1111_1111) != UNSUBACK_FIXED_HEADER) + { + reasonCode = MALFORMED_PACKET; + break decode; + } + + progress = client.onDecodeUnsuback(traceId, authorization, buffer, progress, limit, unsuback); + client.decoder = decodePacketType; + } + + if (reasonCode != SUCCESS) + { + client.onDecodeError(traceId, authorization, reasonCode); + client.decoder = decodeIgnoreAll; + } + } + + return progress; + } + + private int decodePublish( + MqttClient client, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + final int length = limit - offset; + + int progress = offset; + + decode: + if (length >= client.decodeablePacketBytes) + { + int reasonCode = SUCCESS; + final MqttPublishFW publish = mqttPublishRO.tryWrap(buffer, offset, offset + client.decodeablePacketBytes); + + final MqttPublishHeader mqttPublishHeader = mqttPublishHeaderRO.reset(); + + if (publish == null) + { + reasonCode = PROTOCOL_ERROR; + } + else + { + reasonCode = mqttPublishHeader.decode(client, publish.topicName(), publish.properties(), publish.typeAndFlags()); + } + + if (reasonCode == SUCCESS) + { + final int qos = mqttPublishHeader.qos; + MqttSubscribeStream subscriber = client.subscribeStreams.get(qos); + + + final Varuint32FW firstSubscriptionId = subscriptionIdsRW.build().matchFirst(s -> true); + final int subscriptionId = firstSubscriptionId != null ? firstSubscriptionId.value() : 0; + + if (!client.existStreamForId(subscriptionId)) + { + MqttSessionStateFW.Builder sessionStateBuilder = + mqttSessionStateRW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()); + client.sessionStream.subscriptions.forEach(s -> + sessionStateBuilder.subscriptionsItem(si -> + si.subscriptionId(s.id) + .qos(s.qos) + .flags(s.flags) + .pattern(s.filter))); + final Subscription adminSubscription = new Subscription(); + adminSubscription.id = subscriptionId; + adminSubscription.qos = mqttPublishHeader.qos; + adminSubscription.filter = mqttPublishHeader.topic; + client.sessionStream.subscriptions.add(adminSubscription); + + sessionStateBuilder.subscriptionsItem(si -> + si.subscriptionId(adminSubscription.id) + .qos(adminSubscription.qos) + .pattern(adminSubscription.filter)); + + MqttSessionStateFW sessionState = sessionStateBuilder.build(); + client.sessionStream.doSessionData(traceId, authorization, sessionState.sizeof(), EMPTY_OCTETS, sessionState); + + break decode; + } + + if (subscriber == null) + { + break decode; + } + + final OctetsFW payload = publish.payload(); + final int payloadSize = payload.sizeof(); + + if (mqttPublishHeaderRO.payloadFormat.equals(MqttPayloadFormat.TEXT) && invalidUtf8(payload)) + { + reasonCode = PAYLOAD_FORMAT_INVALID; + client.onDecodeError(traceId, authorization, reasonCode); + client.decoder = decodeIgnoreAll; + } + + boolean canPublish = MqttState.replyOpened(subscriber.state); + + int reserved = payloadSize + subscriber.replyPad; + canPublish &= subscriber.replySeq + reserved <= subscriber.replyAck + subscriber.replyMax; + + if (canPublish && subscriber.debitorIndex != NO_DEBITOR_INDEX && reserved != 0) + { + final int minimum = reserved; // TODO: fragmentation + reserved = subscriber.debitor.claim(subscriber.debitorIndex, subscriber.replyId, minimum, reserved); + } + + if (canPublish && (reserved != 0 || payloadSize == 0)) + { + client.onDecodePublish(traceId, authorization, reserved, payload, subscriber); + client.decodeablePacketBytes = 0; + client.decoder = decodePacketType; + progress = publish.limit(); + } + } + else + { + client.onDecodeError(traceId, authorization, reasonCode); + client.decoder = decodeIgnoreAll; + } + } + + return progress; + } + + private boolean invalidUtf8( + OctetsFW payload) + { + boolean invalid = false; + byte[] payloadBytes = charsetBuffer.array(); + final int payloadSize = payload.sizeof(); + payload.value().getBytes(0, payloadBytes, 0, payloadSize); + try + { + charsetBuffer.position(0).limit(payloadSize); + utf8Decoder.decode(charsetBuffer); + } + catch (CharacterCodingException ex) + { + invalid = true; + } + return invalid; + } + + private int decodePingResp( + MqttClient client, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + final int length = limit - offset; + + int progress = offset; + + if (length > 0) + { + final MqttPingRespFW ping = mqttPingRespRO.tryWrap(buffer, offset, limit); + if (ping == null) + { + client.onDecodeError(traceId, authorization, PROTOCOL_ERROR); + client.decoder = decodeIgnoreAll; + } + else + { + client.onDecodePingResp(traceId, authorization, ping); + client.decoder = decodePacketType; + progress = ping.limit(); + } + } + + return progress; + } + + private int decodeDisconnect( + MqttClient server, + final long traceId, + final long authorization, + final long budgetId, + final DirectBuffer buffer, + final int offset, + final int limit) + { + final int length = limit - offset; + + int progress = offset; + + if (length > 0) + { + int reasonCode = NORMAL_DISCONNECT; + + final MqttDisconnectFW disconnect = mqttDisconnectRO.tryWrap(buffer, offset, limit); + if (disconnect == null) + { + reasonCode = PROTOCOL_ERROR; + } + else if ((disconnect.typeAndFlags() & 0b1111_1111) != DISCONNECT_FIXED_HEADER) + { + reasonCode = MALFORMED_PACKET; + } + + if (reasonCode == 0) + { + server.onDecodeDisconnect(traceId, authorization, disconnect); + server.decoder = decodePacketType; + progress = disconnect.limit(); + } + else + { + server.onDecodeError(traceId, authorization, reasonCode); + server.decoder = decodeIgnoreAll; + } + } + + return progress; + } + + private int decodeIgnoreAll( + MqttClient server, + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + return limit; + } + + private int decodeUnknownType( + MqttClient server, + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + server.onDecodeError(traceId, authorization, PROTOCOL_ERROR); + server.decoder = decodeIgnoreAll; + return limit; + } + + @FunctionalInterface + private interface MqttClientDecoder + { + int decode( + MqttClient client, + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit); + } + + private final class MqttClient + { + private final AtomicInteger packetIdCounter; + private final long originId; + private final long routedId; + private final long replyId; + private final long initialId; + private final ObjectHashSet publishStreams; + private final Int2ObjectHashMap subscribeStreams; + private final Int2ObjectHashMap topicAliases; + private final long encodeBudgetId; + + private long budgetId; + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + + private MessageConsumer network; + private MqttSessionStream sessionStream; + + private long decodeSeq; + private long decodeAck; + private int decodeMax; + + private long encodeSeq; + private long encodeAck; + private int encodeMax; + private int encodePad; + + private long encodeBudgetIndex = NO_CREDITOR_INDEX; + private int encodeSharedBudget; + + private int decodeSlot = NO_SLOT; + private int decodeSlotOffset; + private int decodeSlotReserved; + + private int encodeSlot = NO_SLOT; + private int encodeSlotOffset; + private long encodeSlotTraceId; + + private MqttClientDecoder decoder; + private int decodeablePacketBytes; + + private long connackTimeoutId = NO_CANCEL_ID; + private long pingRespTimeoutId = NO_CANCEL_ID; + private long connackTimeoutAt; + private long pingRespTimeoutAt; + + private long keepAliveTimeoutId = NO_CANCEL_ID; + private long keepAliveTimeoutAt; + + private long keepAliveMillis = KEEP_ALIVE; + private long pingRespTimeoutMillis = (long) (KEEP_ALIVE * 0.5); + + private boolean connectAcked; + private short topicAliasMaximum = Short.MAX_VALUE; + private int flags = 0; + + private int capabilities = SHARED_SUBSCRIPTION_AVAILABLE_MASK | WILDCARD_AVAILABLE_MASK | + SUBSCRIPTION_IDS_AVAILABLE_MASK | RETAIN_AVAILABLE_MASK; + private int sessionExpiry = 0; + private String clientId; + private byte maximumQos = 2; + private int maximumPacketSize; + + private int decodablePropertyMask = 0; + + private MqttClient( + long originId, + long routedId, + long initialId, + int maximumPacketSize) + { + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.encodeBudgetId = supplyBudgetId.getAsLong(); + this.replyId = supplyReplyId.applyAsLong(initialId); + this.decoder = decodeInitialType; + this.publishStreams = new ObjectHashSet<>(); + this.subscribeStreams = new Int2ObjectHashMap<>(); + this.topicAliases = new Int2ObjectHashMap<>(); + this.maximumPacketSize = maximumPacketSize; + this.packetIdCounter = new AtomicInteger(); + } + + private void onNetwork( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onNetworkBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onNetworkData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onNetworkEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onNetworkAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onNetworkWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onNetworkReset(reset); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onNetworkSignal(signal); + break; + default: + break; + } + } + + private void onNetworkBegin( + BeginFW begin) + { + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + + state = MqttState.openingInitial(state); + + assert encodeBudgetIndex == NO_CREDITOR_INDEX; + this.encodeBudgetIndex = creditor.acquire(encodeBudgetId); + + doNetworkWindow(traceId, authorization, 0, 0L, 0, bufferPool.slotCapacity()); + } + + private void onNetworkData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + + assert acknowledge <= sequence; + assert sequence >= decodeSeq; + assert acknowledge <= decodeAck; + + decodeSeq = sequence + data.reserved(); + + assert decodeAck <= decodeSeq; + + if (decodeSeq > decodeAck + decodeMax) + { + doNetworkReset(supplyTraceId.getAsLong(), authorization); + } + else + { + final long budgetId = data.budgetId(); + final OctetsFW payload = data.payload(); + + DirectBuffer buffer = payload.buffer(); + int offset = payload.offset(); + int limit = payload.limit(); + int reserved = data.reserved(); + + if (decodeSlot != NO_SLOT) + { + final MutableDirectBuffer slotBuffer = bufferPool.buffer(decodeSlot); + slotBuffer.putBytes(decodeSlotOffset, buffer, offset, limit - offset); + decodeSlotOffset += limit - offset; + decodeSlotReserved += reserved; + + buffer = slotBuffer; + offset = 0; + limit = decodeSlotOffset; + reserved = decodeSlotReserved; + } + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + + private void onNetworkEnd( + EndFW end) + { + final long authorization = end.authorization(); + final long traceId = end.traceId(); + + if (decodeSlot == NO_SLOT) + { + state = MqttState.closeReply(state); + + cleanupStreamsUsingAbort(traceId, authorization); + + doNetworkEnd(traceId, authorization); + + decoder = decodeIgnoreAll; + } + } + + private void onNetworkAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + state = MqttState.closeInitial(state); + + cleanupDecodeSlot(); + + cleanupNetwork(traceId, authorization); + } + + private void onNetworkWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long authorization = window.authorization(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + state = MqttState.openReply(state); + + assert acknowledge <= sequence; + assert sequence <= encodeSeq; + assert acknowledge >= encodeAck; + assert maximum >= encodeMax; + + encodeAck = acknowledge; + encodeMax = maximum; + encodePad = padding; + + assert encodeAck <= encodeSeq; + + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer buffer = bufferPool.buffer(encodeSlot); + final int offset = 0; + final int limit = encodeSlotOffset; + + encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, offset, limit); + } + + final int encodeWin = encodeMax - (int) (encodeSeq - encodeAck); + final int encodeSharedCredit = Math.min(encodeBudgetMax, encodeWin - encodeSlotOffset - encodeSharedBudget); + + if (encodeSharedCredit > 0) + { + final long encodeSharedBudgetPrevious = creditor.credit(traceId, encodeBudgetIndex, encodeSharedCredit); + encodeSharedBudget += encodeSharedCredit; + + assert encodeSharedBudgetPrevious + encodeSharedCredit <= encodeBudgetMax + : String.format("%d + %d <= %d, encodeBudget = %d", + encodeSharedBudgetPrevious, encodeSharedCredit, encodeBudgetMax, encodeWin); + + assert encodeSharedCredit <= encodeBudgetMax + : String.format("%d <= %d", encodeSharedCredit, encodeBudgetMax); + } + sessionStream.doSessionWindow(traceId, authorization, encodeSlotOffset, encodeBudgetMax); + } + + private void onNetworkReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + final long authorization = reset.authorization(); + + cleanupNetwork(traceId, authorization); + } + + private void onNetworkSignal( + SignalFW signal) + { + final int signalId = signal.signalId(); + + switch (signalId) + { + case KEEP_ALIVE_TIMEOUT_SIGNAL: + onKeepAliveTimeoutSignal(signal); + break; + case PINGRESP_TIMEOUT_SIGNAL: + onPingRespTimeoutSignal(signal); + break; + case CONNACK_TIMEOUT_SIGNAL: + onConnectTimeoutSignal(signal); + break; + default: + break; + } + } + + private void onKeepAliveTimeoutSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final long authorization = signal.authorization(); + + doEncodePingReq(traceId, authorization); + keepAliveTimeoutId = NO_CANCEL_ID; + doSignalKeepAliveTimeout(); + } + + private void onPingRespTimeoutSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final long authorization = signal.authorization(); + + cleanupNetwork(traceId, authorization); + } + + private void onConnectTimeoutSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final long authorization = signal.authorization(); + + final long now = System.currentTimeMillis(); + if (now >= connackTimeoutAt) + { + cleanupStreamsUsingAbort(traceId, authorization); + doNetworkEnd(traceId, authorization); + decoder = decodeIgnoreAll; + } + } + + private void doCancelConnackTimeout() + { + if (connackTimeoutId != NO_CANCEL_ID) + { + signaler.cancel(connackTimeoutId); + connackTimeoutId = NO_CANCEL_ID; + } + } + + private int onDecodeConnack( + long traceId, + long authorization, + DirectBuffer buffer, + int progress, + int limit, + MqttConnackFW connack) + { + byte reasonCode; + decode: + { + if (connectAcked) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + + final MqttPropertiesFW properties = connack.properties(); + + reasonCode = decodeConnackProperties(properties); + + if (reasonCode != SUCCESS || connack.reasonCode() != SUCCESS) + { + break decode; + } + + + Flyweight mqttBeginEx = mqttSessionBeginExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder + .flags(flags) + .expiry((int) TimeUnit.MILLISECONDS.toSeconds(sessionExpiry)) + .qosMax(maximumQos) + .packetSizeMax(maximumPacketSize) + .capabilities(capabilities) + .clientId(clientId)) + .build(); + + sessionStream.doSessionBegin(traceId, authorization, 0, mqttBeginEx); + connectAcked = true; + + doCancelConnackTimeout(); + doSignalKeepAliveTimeout(); + } + + progress = connack.limit(); + if (reasonCode != SUCCESS) + { + doCancelConnackTimeout(); + cleanupNetwork(traceId, authorization); + decoder = decodeIgnoreAll; + } + + final Flyweight mqttBeginEx = mqttSessionBeginExRW.wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(mqttTypeId) + .session(sessionBuilder -> sessionBuilder + .flags(flags) + .expiry(sessionExpiry) + .qosMax(maximumQos) + .packetSizeMax(maximumPacketSize) + .capabilities(capabilities) + .clientId(clientId)) + .build(); + + sessionStream.doSessionBegin(traceId, authorization, 0, mqttBeginEx); + + if (connack.reasonCode() != SUCCESS) + { + sessionStream.doSessionReset(traceId, authorization, connack.reasonCode()); + cleanupNetwork(traceId, authorization); + decoder = decodeIgnoreAll; + } + + return progress; + } + + private byte decodeConnackProperties( + MqttPropertiesFW properties) + { + byte reasonCode = SUCCESS; + + final OctetsFW propertiesValue = properties.value(); + final DirectBuffer decodeBuffer = propertiesValue.buffer(); + final int decodeOffset = propertiesValue.offset(); + final int decodeLimit = propertiesValue.limit(); + + decode: + for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) + { + final MqttPropertyFW mqttProperty = mqttPropertyRO.wrap(decodeBuffer, decodeProgress, decodeLimit); + switch (mqttProperty.kind()) + { + case KIND_SESSION_EXPIRY: + if (isSetSessionExpiryInterval(decodablePropertyMask)) + { + sessionExpiry = 0; + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_SESSION_EXPIRY_MASK; + this.sessionExpiry = (int) mqttProperty.sessionExpiry(); + break; + case KIND_TOPIC_ALIAS_MAXIMUM: + if (isSetTopicAliasMaximum(decodablePropertyMask)) + { + topicAliasMaximum = 0; + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_TOPIC_ALIAS_MAXIMUM_MASK; + this.topicAliasMaximum = (short) (mqttProperty.topicAliasMaximum() & 0xFFFF); + break; + case KIND_MAXIMUM_QO_S: + if (isSetMaximumQos(decodablePropertyMask)) + { + maximumQos = 0; + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_MAXIMUM_QOS_MASK; + this.maximumQos = (byte) mqttProperty.maximumQoS(); + break; + case KIND_RECEIVE_MAXIMUM: + case KIND_MAXIMUM_PACKET_SIZE: + final int maxConnackPacketSize = (int) mqttProperty.maximumPacketSize(); + if (maxConnackPacketSize == 0 || isSetMaximumPacketSize(decodablePropertyMask)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_MAXIMUM_PACKET_SIZE_MASK; + if (maxConnackPacketSize < maximumPacketSize) + { + this.maximumPacketSize = maxConnackPacketSize; + } + break; + case KIND_RETAIN_AVAILABLE: + if (isSetRetainAvailable(decodablePropertyMask)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_RETAIN_AVAILABLE_MASK; + if (mqttProperty.retainAvailable() == 0) + { + this.capabilities &= ~(1 << MqttServerCapabilities.RETAIN.value()); + } + break; + case KIND_ASSIGNED_CLIENT_ID: + if (isSetAssignedClientId(decodablePropertyMask)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_ASSIGNED_CLIENT_IDENTIFIER_MASK; + clientId = mqttProperty.assignedClientId().asString(); + break; + case KIND_WILDCARD_SUBSCRIPTION_AVAILABLE: + if (isSetWildcardSubscriptions(decodablePropertyMask)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_WILDCARD_SUBSCRIPTION_AVAILABLE_MASK; + if (mqttProperty.wildcardSubscriptionAvailable() == 0) + { + this.capabilities &= ~(1 << MqttServerCapabilities.WILDCARD.value()); + } + break; + case KIND_SUBSCRIPTION_IDS_AVAILABLE: + if (isSetSubscriptionIdentifiers(decodablePropertyMask)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_SUBSCRIPTION_IDENTIFIERS_MASK; + if (mqttProperty.subscriptionIdsAvailable() == 0) + { + this.capabilities &= ~(1 << MqttServerCapabilities.SUBSCRIPTION_IDS.value()); + } + break; + case KIND_SHARED_SUBSCRIPTION_AVAILABLE: + if (isSetSharedSubscriptions(decodablePropertyMask)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_SHARED_SUBSCRIPTION_AVAILABLE_MASK; + if (mqttProperty.sharedSubscriptionAvailable() == 0) + { + this.capabilities &= ~(1 << MqttServerCapabilities.SHARED_SUBSCRIPTIONS.value()); + } + break; + case KIND_SERVER_KEEP_ALIVE: + if (isSetServerKeepAlive(decodablePropertyMask)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + this.decodablePropertyMask |= CONNACK_SHARED_SUBSCRIPTION_AVAILABLE_MASK; + keepAliveMillis = SECONDS.toMillis(mqttProperty.serverKeepAlive()); + pingRespTimeoutMillis = (long) (keepAliveMillis * 0.5); + break; + case KIND_USER_PROPERTY: + // TODO + break; + case KIND_AUTHENTICATION_METHOD: + reasonCode = BAD_AUTHENTICATION_METHOD; + break decode; + case KIND_AUTHENTICATION_DATA: + // TODO + break; + default: + reasonCode = MALFORMED_PACKET; + break decode; + } + + decodeProgress = mqttProperty.limit(); + } + + return reasonCode; + } + + private void onDecodePingResp( + long traceId, + long authorization, + MqttPingRespFW ping) + { + doCancelPingRespTimeout(); + } + + private int onDecodeSuback( + long traceId, + long authorization, + DirectBuffer buffer, + int progress, + int limit, + MqttSubackFW suback) + { + final int packetId = suback.packetId(); + final OctetsFW decodePayload = suback.payload(); + + final DirectBuffer decodeBuffer = decodePayload.buffer(); + final int decodeOffset = decodePayload.offset(); + final int decodeLimit = decodePayload.limit(); + + final List unackedSubscriptions = sessionStream.unAckedSubscriptionsByPacketId.remove(packetId); + MqttSessionStateFW.Builder sessionStateBuilder = + mqttSessionStateRW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()); + + sessionStream.subscriptions.stream().filter(s -> !unackedSubscriptions.contains(s)).forEach(s -> + sessionStateBuilder.subscriptionsItem(si -> + si.subscriptionId(s.id) + .qos(s.qos) + .flags(s.flags) + .pattern(s.filter))); + + int i = 0; + for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) + { + final MqttSubackPayloadFW subackPayload = + mqttSubackPayloadRO.tryWrap(decodeBuffer, decodeProgress, decodeLimit); + if (subackPayload == null) + { + break; + } + decodeProgress = subackPayload.limit(); + + final Subscription subscription = unackedSubscriptions.get(i++); + sessionStateBuilder.subscriptionsItem(si -> + si.subscriptionId(subscription.id) + .qos(subscription.qos) + .flags(subscription.flags) + .reasonCode(subackPayload.reasonCode()) + .pattern(subscription.filter)); + } + final MqttSessionStateFW sessionState = sessionStateBuilder.build(); + sessionStream.doSessionData(traceId, authorization, sessionState.sizeof(), EMPTY_OCTETS, sessionState); + + progress = suback.limit(); + return progress; + } + + private int onDecodeUnsuback( + long traceId, + long authorization, + DirectBuffer buffer, + int progress, + int limit, + MqttUnsubackFW unsuback) + { + final int packetId = unsuback.packetId(); + final OctetsFW decodePayload = unsuback.payload(); + + final DirectBuffer decodeBuffer = decodePayload.buffer(); + final int decodeOffset = decodePayload.offset(); + final int decodeLimit = decodePayload.limit(); + + final List unackedSubscriptions = sessionStream.unAckedSubscriptionsByPacketId.remove(packetId); + sessionStream.subscriptions.removeAll(unackedSubscriptions); + + MqttSessionStateFW.Builder sessionStateBuilder = + mqttSessionStateRW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()); + sessionStream.subscriptions.forEach(s -> + sessionStateBuilder.subscriptionsItem(si -> + si.subscriptionId(s.id) + .qos(s.qos) + .flags(s.flags) + .pattern(s.filter))); + int i = 0; + for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) + { + final MqttUnsubackPayloadFW unsubackPayload = + mqttUnsubackPayloadRO.tryWrap(decodeBuffer, decodeProgress, decodeLimit); + if (unsubackPayload == null) + { + break; + } + decodeProgress = unsubackPayload.limit(); + + final int reasonCode = unsubackPayload.reasonCode(); + if (reasonCode != SUCCESS) + { + final Subscription subscription = unackedSubscriptions.get(i); + sessionStateBuilder.subscriptionsItem(si -> + si.subscriptionId(subscription.id) + .qos(subscription.qos) + .flags(subscription.flags) + .reasonCode(reasonCode) + .pattern(subscription.filter)); + } + i++; + } + final MqttSessionStateFW sessionState = sessionStateBuilder.build(); + sessionStream.doSessionData(traceId, authorization, sessionState.sizeof(), EMPTY_OCTETS, sessionState); + + progress = unsuback.limit(); + + return progress; + } + + private void onDecodePublish( + long traceId, + long authorization, + int reserved, + OctetsFW payload, + MqttSubscribeStream stream) + { + final MqttDataExFW.Builder builder = mqttPublishDataExRW.wrap(dataExtBuffer, 0, dataExtBuffer.capacity()) + .typeId(mqttTypeId) + .subscribe(s -> + { + s.topic(mqttPublishHeaderRO.topic); + s.qos(mqttPublishHeaderRO.qos); + s.flags(mqttPublishHeaderRO.flags); + s.subscriptionIds(subscriptionIdsRW.build()); + s.expiryInterval(mqttPublishHeaderRO.expiryInterval); + s.contentType(mqttPublishHeaderRO.contentType); + s.format(f -> f.set(mqttPublishHeaderRO.payloadFormat)); + s.responseTopic(mqttPublishHeaderRO.responseTopic); + s.correlation(c -> c.bytes(mqttPublishHeaderRO.correlationData)); + final Array32FW userProperties = userPropertiesRW.build(); + userProperties.forEach(c -> s.propertiesItem(p -> p.key(c.key()).value(c.value()))); + }); + + + final MqttDataExFW dataEx = builder.build(); + if (stream != null) + { + stream.doSubscribeData(traceId, authorization, reserved, payload, dataEx); + } + } + + private void onDecodeDisconnect( + long traceId, + long authorization, + MqttDisconnectFW disconnect) + { + byte reasonCode = decodeDisconnectProperties(disconnect.properties()); + + if (reasonCode != SUCCESS) + { + onDecodeError(traceId, authorization, reasonCode); + decoder = decodeIgnoreAll; + } + else + { + if (disconnect.reasonCode() != SUCCESS) + { + sessionStream.doSessionReset(traceId, authorization, disconnect.reasonCode()); + } + } + + state = MqttState.closingInitial(state); + closeStreams(traceId, authorization); + doNetworkEnd(traceId, authorization); + } + + private byte decodeDisconnectProperties( + MqttPropertiesFW properties) + { + byte reasonCode = SUCCESS; + + final OctetsFW propertiesValue = properties.value(); + final DirectBuffer decodeBuffer = propertiesValue.buffer(); + final int decodeOffset = propertiesValue.offset(); + final int decodeLimit = propertiesValue.limit(); + + decode: + for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) + { + final MqttPropertyFW mqttProperty = mqttPropertyRO.wrap(decodeBuffer, decodeProgress, decodeLimit); + switch (mqttProperty.kind()) + { + case KIND_SESSION_EXPIRY: + reasonCode = PROTOCOL_ERROR; + break decode; + default: + break; + } + + decodeProgress = mqttProperty.limit(); + } + + return reasonCode; + } + + private void onDecodeError( + long traceId, + long authorization, + int reasonCode) + { + cleanupStreamsUsingAbort(traceId, authorization); + doNetworkEnd(traceId, authorization); + } + + private void doNetworkBegin( + long traceId, + long authorization, + long affinity) + { + if (!MqttState.initialOpening(state)) + { + state = MqttState.openingInitial(state); + + network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, + initialMax, traceId, authorization, affinity, EMPTY_OCTETS); + } + } + + private void doNetworkData( + long traceId, + long authorization, + long budgetId, + Flyweight flyweight) + { + doNetworkData(traceId, authorization, budgetId, flyweight.buffer(), flyweight.offset(), flyweight.limit()); + } + + private void doNetworkData( + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + if (encodeSlot != NO_SLOT) + { + final MutableDirectBuffer encodeBuffer = bufferPool.buffer(encodeSlot); + encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset); + encodeSlotOffset += limit - offset; + encodeSlotTraceId = traceId; + + buffer = encodeBuffer; + offset = 0; + limit = encodeSlotOffset; + } + + encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit); + } + + private void doNetworkEnd( + long traceId, + long authorization) + { + if (!MqttState.initialClosed(state)) + { + state = MqttState.closeInitial(state); + + cleanupBudgetCreditor(); + cleanupEncodeSlot(); + + doEnd(network, originId, routedId, initialId, encodeSeq, encodeAck, encodeMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void doNetworkAbort( + long traceId, + long authorization) + { + if (!MqttState.replyClosed(state)) + { + state = MqttState.closeReply(state); + + cleanupBudgetCreditor(); + cleanupEncodeSlot(); + + doAbort(network, originId, routedId, initialId, encodeSeq, encodeAck, encodeMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void doNetworkReset( + long traceId, + long authorization) + { + if (!MqttState.initialClosed(state)) + { + state = MqttState.closeInitial(state); + + cleanupDecodeSlot(); + + doReset(network, originId, routedId, initialId, decodeSeq, decodeAck, decodeMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void doNetworkWindow( + long traceId, + long authorization, + int padding, + long budgetId) + { + doNetworkWindow(traceId, authorization, padding, budgetId, decodeSlotReserved, decodeMax); + } + + private void doNetworkWindow( + long traceId, + long authorization, + int padding, + long budgetId, + int minReplyNoAck, + int minReplyMax) + { + final long newReplyAck = Math.max(decodeSeq - minReplyNoAck, decodeAck); + + if (newReplyAck > decodeAck || minReplyMax > decodeMax || !MqttState.initialOpened(state)) + { + decodeAck = newReplyAck; + assert decodeAck <= decodeSeq; + + decodeMax = minReplyMax; + + state = MqttState.openInitial(state); + + doWindow(network, originId, routedId, replyId, decodeSeq, decodeAck, decodeMax, + traceId, authorization, budgetId, padding); + } + } + + private void doEncodePublish( + long traceId, + long authorization, + int flags, + OctetsFW payload, + OctetsFW extension, + String topic) + { + if ((flags & 0x02) != 0) + { + final MqttDataExFW mqttDataEx = extension.get(mqttPublishDataExRO::tryWrap); + final int payloadSize = payload.sizeof(); + + assert mqttDataEx.kind() == MqttBeginExFW.KIND_PUBLISH; + final MqttPublishDataExFW mqttPublishDataEx = mqttDataEx.publish(); + + final int deferred = mqttPublishDataEx.deferred(); + final int expiryInterval = mqttPublishDataEx.expiryInterval(); + final String16FW contentType = mqttPublishDataEx.contentType(); + final String16FW responseTopic = mqttPublishDataEx.responseTopic(); + final MqttBinaryFW correlation = mqttPublishDataEx.correlation(); + final MqttPayloadFormatFW format = mqttPublishDataEx.format(); + final Array32FW properties = + mqttPublishDataEx.properties(); + + final int topicLength = topic.length(); + + AtomicInteger propertiesSize = new AtomicInteger(); + + final int publishFlags = mqttPublishDataEx.flags(); + final int qos = mqttPublishDataEx.qos(); + + final int publishNetworkTypeAndFlags = PUBLISH_TYPE << 4 | + calculatePublishNetworkFlags(PUBLISH_TYPE << 4 | publishFlags, qos); + + if (expiryInterval != -1) + { + mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) + .expiryInterval(expiryInterval) + .build(); + propertiesSize.set(mqttPropertyRW.limit()); + } + + if (contentType.value() != null) + { + mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) + .contentType(contentType.asString()) + .build(); + propertiesSize.set(mqttPropertyRW.limit()); + } + + if (!format.get().equals(MqttPayloadFormat.NONE)) + { + mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) + .payloadFormat((byte) mqttPublishDataEx.format().get().ordinal()) + .build(); + propertiesSize.set(mqttPropertyRW.limit()); + } + + if (responseTopic.value() != null) + { + mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) + .responseTopic(responseTopic.asString()) + .build(); + propertiesSize.set(mqttPropertyRW.limit()); + } + + if (correlation.length() != -1) + { + mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) + .correlationData(a -> a.bytes(correlation.bytes())) + .build(); + propertiesSize.set(mqttPropertyRW.limit()); + } + + properties.forEach(p -> + { + mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) + .userProperty(c -> c.key(p.key()).value(p.value())) + .build(); + propertiesSize.set(mqttPropertyRW.limit()); + }); + + final int propertiesSize0 = propertiesSize.get(); + final MqttPublishFW publish = + mqttPublishRW.wrap(writeBuffer, DataFW.FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) + .typeAndFlags(publishNetworkTypeAndFlags) + .remainingLength(3 + topicLength + propertiesSize.get() + payloadSize + deferred) + .topicName(topic) + .properties(p -> p.length(propertiesSize0) + .value(propertyBuffer, 0, propertiesSize0)) + .payload(payload) + .build(); + + doNetworkData(traceId, authorization, 0L, publish); + } + else + { + doNetworkData(traceId, authorization, 0L, payload); + } + } + + private int calculatePublishNetworkFlags(int applicationTypeAndFlags, int qos) + { + int flags = 0; + + if ((applicationTypeAndFlags & RETAIN_FLAG) != 0) + { + flags |= RETAIN_MASK; + } + + if (qos == MqttQoS.AT_LEAST_ONCE.value()) + { + flags |= PUBLISH_QOS1_MASK; + } + else if (qos == MqttQoS.EXACTLY_ONCE.value()) + { + flags |= PUBLISH_QOS2_MASK; + } + return flags; + } + + private void doEncodeConnect( + long traceId, + long authorization, + String clientId, + int flags, + int sessionExpiry, + MqttWillMessageFW willMessage) + { + int propertiesSize = 0; + + MqttPropertyFW mqttProperty; + + //TODO: remove this once we support large messages + mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) + .maximumPacketSize(maximumPacketSize) + .build(); + propertiesSize = mqttProperty.limit(); + + if (sessionExpiry != 0) + { + mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) + .sessionExpiry(sessionExpiry) + .build(); + propertiesSize = mqttProperty.limit(); + } + MqttWillFW will = null; + if (willMessage != null) + { + final int expiryInterval = willMessage.expiryInterval(); + final String16FW contentType = willMessage.contentType(); + final String16FW responseTopic = willMessage.responseTopic(); + final MqttBinaryFW correlation = willMessage.correlation(); + final MqttPayloadFormatFW format = willMessage.format(); + final Array32FW properties = + willMessage.properties(); + + AtomicInteger willPropertiesSize = new AtomicInteger(); + if (expiryInterval != -1) + { + mqttWillPropertyRW.wrap(willPropertyBuffer, willPropertiesSize.get(), willPropertyBuffer.capacity()) + .expiryInterval(expiryInterval) + .build(); + willPropertiesSize.set(mqttWillPropertyRW.limit()); + } + + if (contentType.value() != null) + { + mqttWillPropertyRW.wrap(willPropertyBuffer, willPropertiesSize.get(), willPropertyBuffer.capacity()) + .contentType(contentType.asString()) + .build(); + willPropertiesSize.set(mqttWillPropertyRW.limit()); + } + + if (!format.get().equals(MqttPayloadFormat.NONE)) + { + mqttWillPropertyRW.wrap(willPropertyBuffer, willPropertiesSize.get(), willPropertyBuffer.capacity()) + .payloadFormat((byte) format.get().ordinal()) + .build(); + willPropertiesSize.set(mqttWillPropertyRW.limit()); + } + + if (responseTopic.value() != null) + { + mqttWillPropertyRW.wrap(willPropertyBuffer, willPropertiesSize.get(), willPropertyBuffer.capacity()) + .responseTopic(responseTopic.asString()) + .build(); + willPropertiesSize.set(mqttWillPropertyRW.limit()); + } + + if (correlation.length() != -1) + { + mqttWillPropertyRW.wrap(willPropertyBuffer, willPropertiesSize.get(), willPropertyBuffer.capacity()) + .correlationData(a -> a.bytes(correlation.bytes())) + .build(); + willPropertiesSize.set(mqttWillPropertyRW.limit()); + } + + properties.forEach(p -> + { + mqttWillPropertyRW.wrap(willPropertyBuffer, willPropertiesSize.get(), willPropertyBuffer.capacity()) + .userProperty(c -> c.key(p.key()).value(p.value())) + .build(); + willPropertiesSize.set(mqttWillPropertyRW.limit()); + }); + + will = willMessageRW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity()) + .properties(p -> p.length(willPropertiesSize.get()) + .value(willPropertyBuffer, 0, willPropertiesSize.get())) + .topic(willMessage.topic()) + .payload(p -> p.bytes(willMessage.payload().bytes())) + .build(); + } + + final int propertiesSize0 = propertiesSize; + final int willSize = will != null ? will.sizeof() : 0; + flags |= will != null ? (WILL_FLAG_MASK | ((willMessage.flags() & RETAIN_MASK) != 0 ? WILL_RETAIN_MASK : 0)) : 0; + final MqttConnectFW connect = + mqttConnectRW.wrap(writeBuffer, FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) + .typeAndFlags(0x10) + .remainingLength(11 + propertiesSize0 + clientId.length() + 2 + willSize) + .protocolName(MQTT_PROTOCOL_NAME) + .protocolVersion(MQTT_PROTOCOL_VERSION) + .flags(flags) + .keepAlive((int) MILLISECONDS.toSeconds(keepAliveMillis)) + .properties(p -> p.length(propertiesSize0) + .value(propertyBuffer, 0, propertiesSize0)) + .clientId(clientId) + .build(); + + doNetworkData(traceId, authorization, 0L, connect); + if (will != null) + { + doNetworkData(traceId, authorization, 0L, will); + } + } + + private void doEncodeSubscribe( + long traceId, + long authorization, + List subscriptions, + int packetId) + { + int propertiesSize = 0; + + MqttPropertyFW mqttProperty; + + final int subscriptionId = subscriptions.get(0).id; + if (subscriptionId != 0) + { + mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) + .subscriptionId(i -> i.set(subscriptionId)) + .build(); + propertiesSize = mqttProperty.limit(); + } + + final int propertiesSize0 = propertiesSize; + + final MutableDirectBuffer encodeBuffer = payloadBuffer; + final int encodeOffset = 0; + final int encodeLimit = payloadBuffer.capacity(); + + int encodeProgress = encodeOffset; + + for (Subscription s : subscriptions) + { + final int flags = calculateSubscribeFlags(s.flags); + final MqttSubscribePayloadFW subscribePayload = + mqttSubscribePayloadRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .filter(s.filter) + .options(flags) + .build(); + encodeProgress = subscribePayload.limit(); + } + + final OctetsFW encodePayload = octetsRO.wrap(encodeBuffer, encodeOffset, encodeProgress); + final MqttSubscribeFW subscribe = + mqttSubscribeRW.wrap(writeBuffer, FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) + .typeAndFlags(0x82) + .remainingLength(3 + propertiesSize0 + encodePayload.sizeof()) + .packetId(packetId) + .properties(p -> p.length(propertiesSize0) + .value(propertyBuffer, 0, propertiesSize0)) + .payload(encodePayload) + .build(); + + sessionStream.unAckedSubscriptionsByPacketId.put(packetId, subscriptions); + doNetworkData(traceId, authorization, 0L, subscribe); + } + + private void doEncodeUnsubscribe( + long traceId, + long authorization, + List subscriptions, + int packetId) + { + final MutableDirectBuffer encodeBuffer = payloadBuffer; + final int encodeOffset = 0; + final int encodeLimit = payloadBuffer.capacity(); + + int encodeProgress = encodeOffset; + + for (Subscription s : subscriptions) + { + final MqttUnsubscribePayloadFW unsubscribePayload = + mqttUnsubscribePayloadRW.wrap(encodeBuffer, encodeProgress, encodeLimit) + .filter(s.filter) + .build(); + encodeProgress = unsubscribePayload.limit(); + } + + final OctetsFW encodePayload = octetsRO.wrap(encodeBuffer, encodeOffset, encodeProgress); + final MqttUnsubscribeFW unsubscribe = + mqttUnsubscribeRW.wrap(writeBuffer, FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) + .typeAndFlags(0xa2) + .remainingLength(3 + encodePayload.sizeof()) + .packetId(packetId) + .properties(p -> p.length(0) + .value(propertyBuffer, 0, 0)) + .payload(encodePayload) + .build(); + + sessionStream.unAckedSubscriptionsByPacketId.put(packetId, subscriptions); + doNetworkData(traceId, authorization, 0L, unsubscribe); + } + + private void doEncodePingReq( + long traceId, + long authorization) + { + final MqttPingReqFW pingReq = + mqttPingReqRW.wrap(writeBuffer, FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) + .typeAndFlags(0xc0) + .remainingLength(0x00) + .build(); + + doNetworkData(traceId, authorization, 0L, pingReq); + doSignalPingRespTimeout(); + } + + private void doEncodeDisconnect( + long traceId, + long authorization, + int reasonCode) + { + int propertiesSize = 0; + + final int propertySize0 = propertiesSize; + final MqttDisconnectFW disconnect = + mqttDisconnectRW.wrap(writeBuffer, FIELD_OFFSET_PAYLOAD, writeBuffer.capacity()) + .typeAndFlags(0xe0) + .remainingLength(2 + propertySize0) + .reasonCode(reasonCode & 0xff) + .properties(p -> p.length(propertySize0) + .value(propertyBuffer, 0, propertySize0)) + .build(); + + doNetworkData(traceId, authorization, 0L, disconnect); + } + + private void encodeNetwork( + long traceId, + long authorization, + long budgetId, + DirectBuffer buffer, + int offset, + int limit) + { + final int maxLength = limit - offset; + final int encodeWin = encodeMax - (int) (encodeSeq - encodeAck + encodeSlotOffset); + final int length = Math.min(maxLength, Math.max(encodeWin - encodePad, 0)); + + if (length > 0) + { + final int reserved = length + encodePad; + + doData(network, originId, routedId, initialId, encodeSeq, encodeAck, encodeMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_OCTETS); + + encodeSeq += reserved; + + assert encodeSeq <= encodeAck + encodeMax : + String.format("%d <= %d + %d", encodeSeq, encodeAck, encodeMax); + } + + final int remaining = maxLength - length; + if (remaining > 0) + { + if (encodeSlot == NO_SLOT) + { + encodeSlot = bufferPool.acquire(replyId); + assert encodeSlotOffset == 0; + } + + if (encodeSlot == NO_SLOT) + { + cleanupNetwork(traceId, authorization); + } + else + { + final MutableDirectBuffer encodeBuffer = bufferPool.buffer(encodeSlot); + encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeSlotOffset = remaining; + } + } + else + { + cleanupEncodeSlot(); + + if (publishStreams.isEmpty() && subscribeStreams.isEmpty() && decoder == decodeIgnoreAll) + { + doNetworkEnd(traceId, authorization); + } + } + } + + private void decodeNetwork( + long traceId) + { + if (decodeSlot != NO_SLOT) + { + final long authorization = 0L; // TODO; + final long budgetId = 0L; // TODO + + final DirectBuffer buffer = bufferPool.buffer(decodeSlot); + final int offset = 0; + final int limit = decodeSlotOffset; + final int reserved = decodeSlotReserved; + + decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit); + } + } + + private void decodeNetwork( + long traceId, + long authorization, + long budgetId, + int reserved, + DirectBuffer buffer, + int offset, + int limit) + { + MqttClientDecoder previous = null; + int progress = offset; + while (progress <= limit && previous != decoder) + { + previous = decoder; + progress = decoder.decode(this, traceId, authorization, budgetId, buffer, progress, limit); + } + + if (progress < limit) + { + if (decodeSlot == NO_SLOT) + { + decodeSlot = bufferPool.acquire(initialId); + } + + if (decodeSlot == NO_SLOT) + { + cleanupNetwork(traceId, authorization); + } + else + { + final MutableDirectBuffer slotBuffer = bufferPool.buffer(decodeSlot); + slotBuffer.putBytes(0, buffer, progress, limit - progress); + decodeSlotOffset = limit - progress; + decodeSlotReserved = (int) ((long) reserved * (limit - progress) / (limit - offset)); + } + } + else + { + cleanupDecodeSlot(); + + if (MqttState.initialClosing(state)) + { + state = MqttState.closeInitial(state); + cleanupStreamsUsingAbort(traceId, authorization); + doNetworkEnd(traceId, authorization); + } + } + + if (!MqttState.initialClosed(state)) + { + doNetworkWindow(traceId, authorization, 0, budgetId, decodeSlotReserved, decodeMax); + } + } + + private void cleanupNetwork( + long traceId, + long authorization) + { + cleanupStreamsUsingAbort(traceId, authorization); + + doNetworkReset(traceId, authorization); + doNetworkAbort(traceId, authorization); + } + + private void cleanupStreamsUsingAbort( + long traceId, + long authorization) + { + publishStreams.forEach(s -> s.cleanupAbort(traceId, authorization)); + subscribeStreams.values().forEach(s -> s.cleanupAbort(traceId, authorization)); + if (sessionStream != null) + { + sessionStream.cleanupAbort(traceId, authorization); + } + } + + private void closeStreams( + long traceId, + long authorization) + { + publishStreams.forEach(s -> s.doPublishEnd(traceId, authorization)); + subscribeStreams.values().forEach(s -> s.doSubscribeEnd(traceId, authorization)); + if (sessionStream != null) + { + sessionStream.cleanupEnd(traceId, authorization); + } + } + + private void cleanupBudgetCreditor() + { + if (encodeBudgetIndex != NO_CREDITOR_INDEX) + { + creditor.release(encodeBudgetIndex); + encodeBudgetIndex = NO_CREDITOR_INDEX; + } + } + + private void cleanupDecodeSlot() + { + if (decodeSlot != NO_SLOT) + { + bufferPool.release(decodeSlot); + decodeSlot = NO_SLOT; + decodeSlotOffset = 0; + decodeSlotReserved = 0; + } + } + + private void cleanupEncodeSlot() + { + if (encodeSlot != NO_SLOT) + { + bufferPool.release(encodeSlot); + encodeSlot = NO_SLOT; + encodeSlotOffset = 0; + encodeSlotTraceId = 0; + } + } + + private void doSignalKeepAliveTimeout() + { + if (keepAliveMillis > 0) + { + keepAliveTimeoutAt = System.currentTimeMillis() + keepAliveMillis; + + if (keepAliveTimeoutId == NO_CANCEL_ID) + { + keepAliveTimeoutId = + signaler.signalAt(keepAliveTimeoutAt, originId, routedId, initialId, KEEP_ALIVE_TIMEOUT_SIGNAL, 0); + } + } + } + + private void doSignalConnackTimeout() + { + connackTimeoutAt = System.currentTimeMillis() + connackTimeoutMillis; + + if (connackTimeoutId == NO_CANCEL_ID) + { + connackTimeoutId = signaler.signalAt(connackTimeoutAt, originId, routedId, initialId, CONNACK_TIMEOUT_SIGNAL, 0); + } + } + + private void doSignalPingRespTimeout() + { + pingRespTimeoutAt = System.currentTimeMillis() + pingRespTimeoutMillis; + + if (pingRespTimeoutId == NO_CANCEL_ID) + { + pingRespTimeoutId = + signaler.signalAt(pingRespTimeoutAt, originId, routedId, initialId, PINGRESP_TIMEOUT_SIGNAL, 0); + } + } + + private void doCancelPingRespTimeout() + { + if (pingRespTimeoutId != NO_CANCEL_ID) + { + signaler.cancel(pingRespTimeoutId); + pingRespTimeoutId = NO_CANCEL_ID; + } + } + + private int calculateSubscribeFlags( + int options) + { + int flags = 0; + if ((options & SEND_RETAINED_FLAG) == 0) + { + flags |= DO_NOT_SEND_RETAINED_MASK; + } + + if ((options & RETAIN_AS_PUBLISHED_FLAG) != 0) + { + flags |= RETAIN_AS_PUBLISHED_MASK; + } + + if ((options & NO_LOCAL_FLAG) != 0) + { + flags |= NO_LOCAL_FLAG_MASK; + } + + + return flags; + } + + private boolean existStreamForId( + int subscriptionId) + { + return sessionStream.subscriptions.stream().anyMatch(s -> s.id == subscriptionId); + } + + private int nextPacketId() + { + final int packetId = packetIdCounter.incrementAndGet(); + if (packetId == Integer.MAX_VALUE) + { + packetIdCounter.set(0); + } + return packetId; + } + } + + private final class MqttSessionStream + { + private final MqttClient client; + private final List subscriptions; + private final Int2ObjectHashMap> unAckedSubscriptionsByPacketId; + private MessageConsumer application; + + private long originId; + private long routedId; + private long initialId; + private long replyId; + private long budgetId; + + private BudgetDebitor debitor; + private long debitorIndex = NO_DEBITOR_INDEX; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + + private long replySeq; + private long replyAck; + private int replyMax; + private long replyBud; + private int replyPad; + private int state; + + MqttSessionStream( + MqttClient client, + MessageConsumer application, + long originId, + long routedId, + long initialId) + { + this.client = client; + this.application = application; + this.subscriptions = new ArrayList<>(); + this.unAckedSubscriptionsByPacketId = new Int2ObjectHashMap<>(); + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void onSession( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onSessionBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onSessionData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onSessionEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onSessionAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onSessionWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onSessionReset(reset); + break; + } + } + + private void onSessionWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long authorization = window.authorization(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final boolean wasOpen = MqttState.replyOpened(state); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyBud = budgetId; + replyPad = padding; + state = MqttState.openReply(state); + + assert initialAck <= initialSeq; + + if (budgetId != 0L && debitorIndex == NO_DEBITOR_INDEX) + { + debitor = supplyDebitor.apply(budgetId); + debitorIndex = debitor.acquire(budgetId, initialId, client::decodeNetwork); + } + + if (MqttState.initialClosing(state) && !MqttState.initialClosed(state)) + { + doSessionEnd(traceId, authorization); + } + + if (!wasOpen) + { + doSessionData(traceId, authorization, 0, EMPTY_OCTETS, EMPTY_OCTETS); + } + } + + private void onSessionReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + final long authorization = reset.authorization(); + + state = MqttState.closeReply(state); + + client.doNetworkReset(traceId, authorization); + } + + private void onSessionBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + final OctetsFW extension = begin.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + initialMax = maximum; + state = MqttState.openingInitial(state); + + + client.doNetworkBegin(traceId, authorization, affinity); + + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SESSION; + final MqttSessionBeginExFW mqttSessionBeginEx = mqttBeginEx.session(); + + client.clientId = mqttSessionBeginEx.clientId().asString(); + client.flags = mqttSessionBeginEx.flags(); + client.sessionExpiry = mqttSessionBeginEx.expiry(); + + if (!isSetWillFlag(mqttSessionBeginEx.flags())) + { + client.doEncodeConnect(traceId, authorization, client.clientId, client.flags, client.sessionExpiry, null); + client.doSignalConnackTimeout(); + } + doSessionWindow(traceId, authorization, client.encodeSlotOffset, encodeBudgetMax); + } + + private void onSessionData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int reserved = data.reserved(); + final long authorization = data.authorization(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge <= initialAck; + + initialSeq = sequence + reserved; + client.encodeSharedBudget -= reserved; + + assert initialAck <= initialSeq; + + if (initialSeq > initialAck + initialMax) + { + doSessionReset(traceId, authorization); + client.doNetworkAbort(traceId, authorization); + } + else + { + + final ExtensionFW dataEx = extension.get(extensionRO::tryWrap); + final MqttDataExFW mqttDataEx = + dataEx != null && dataEx.typeId() == mqttTypeId ? extension.get(mqttDataExRO::tryWrap) : null; + final MqttSessionDataExFW mqttSessionDataEx = + mqttDataEx != null && mqttDataEx.kind() == MqttDataExFW.KIND_SESSION ? mqttDataEx.session() : null; + + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); + + switch (mqttSessionDataEx.kind().get()) + { + case WILL: + MqttWillMessageFW willMessage = mqttWillMessageRO.tryWrap(buffer, offset, limit); + client.doEncodeConnect(traceId, authorization, client.clientId, client.flags, + client.sessionExpiry, willMessage); + client.doSignalConnackTimeout(); + break; + case STATE: + MqttSessionStateFW sessionState = mqttSessionStateRO.tryWrap(buffer, offset, limit); + + final List newSubscribeState = new ArrayList<>(); + sessionState.subscriptions().forEach(filter -> + { + Subscription subscription = new Subscription(); + subscription.id = (int) filter.subscriptionId(); + subscription.filter = filter.pattern().asString(); + subscription.flags = filter.flags(); + subscription.qos = filter.qos(); + newSubscribeState.add(subscription); + }); + + + final List newSubscriptions = newSubscribeState.stream() + .filter(s -> !subscriptions.contains(s)) + .collect(Collectors.toList()); + + final List oldSubscriptions = subscriptions.stream() + .filter(s -> !newSubscribeState.contains(s)) + .collect(Collectors.toList()); + final int packetId = client.nextPacketId(); + + if (newSubscriptions.size() > 0) + { + client.doEncodeSubscribe(traceId, authorization, newSubscriptions, packetId); + } + if (oldSubscriptions.size() > 0) + { + client.doEncodeUnsubscribe(traceId, authorization, oldSubscriptions, packetId); + } + client.sessionStream.subscriptions.addAll(newSubscriptions); + client.sessionStream.subscriptions.removeAll(oldSubscriptions); + break; + } + } + } + + private void onSessionEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = MqttState.closeInitial(state); + + assert initialAck <= initialSeq; + + client.doEncodeDisconnect(traceId, authorization, SUCCESS); + client.doNetworkEnd(traceId, authorization); + + doSessionEnd(traceId, authorization); + } + + private void onSessionAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = MqttState.closeInitial(state); + + assert initialAck <= initialSeq; + + client.doNetworkAbort(traceId, authorization); + } + + private void doSessionData( + long traceId, + long authorization, + int reserved, + Flyweight dataEx, + Flyweight payload) + { + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); + final int length = limit - offset; + + if (!MqttState.closed(state)) + { + doData(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, dataEx); + + replySeq += reserved; + assert replySeq <= replyAck + replyMax; + } + } + + private void cleanupAbort( + long traceId, + long authorization) + { + doSessionAbort(traceId, authorization); + doSessionReset(traceId, authorization); + } + + private void cleanupEnd( + long traceId, + long authorization) + { + doSessionEnd(traceId, authorization); + } + + private void doSessionAbort( + long traceId, + long authorization) + { + if (!MqttState.replyClosed(state)) + { + state = MqttState.closeReply(state); + + doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void doSessionWindow( + long traceId, + long authorization, + int minInitialNoAck, + int minInitialMax) + { + if (MqttState.replyOpened(client.state)) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !MqttState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, client.encodeBudgetId, PUBLISH_FRAMING); + } + } + } + + private void doSessionReset( + long traceId, + long authorization, + int reasonCode) + { + if (!MqttState.initialClosed(state)) + { + Flyweight resetEx = mqttResetExRW + .wrap(extBuffer, 0, extBuffer.capacity()) + .typeId(mqttTypeId) + .reasonCode(reasonCode) + .build(); + + state = MqttState.closeInitial(state); + + doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, resetEx); + } + } + + private void doSessionReset( + long traceId, + long authorization) + { + if (!MqttState.initialClosed(state)) + { + state = MqttState.closeInitial(state); + + doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + public void doSessionBegin( + long traceId, + long authorization, + long affinity, + Flyweight extension) + { + state = MqttState.openingReply(state); + + doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, extension); + } + + private void doSessionEnd( + long traceId, + long authorization) + { + if (!MqttState.replyClosed(state)) + { + state = MqttState.closeReply(state); + + doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + } + } + } + + private class MqttSubscribeStream + { + private final MessageConsumer application; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private long budgetId; + + private MqttClient client; + private BudgetDebitor debitor; + private long debitorIndex = NO_DEBITOR_INDEX; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private int state; + private List subscriptions; + + MqttSubscribeStream( + MqttClient client, + MessageConsumer application, + long originId, + long routedId, + long initialId) + { + this.client = client; + this.application = application; + this.subscriptions = new ArrayList<>(); + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void onSubscribe( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onSubscribeBegin(begin); + break; + case FlushFW.TYPE_ID: + final FlushFW flush = flushRO.wrap(buffer, index, index + length); + onSubscribeFlush(flush); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onSubscribeEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onSubscribeAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onSubscribeWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onSubscribeReset(reset); + break; + } + } + + private void onSubscribeBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + final OctetsFW extension = begin.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + initialMax = maximum; + state = MqttState.openingInitial(state); + + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SUBSCRIBE; + final MqttSubscribeBeginExFW mqttSubscribeBeginEx = mqttBeginEx.subscribe(); + + final Array32FW filters = mqttSubscribeBeginEx.filters(); + + filters.forEach(filter -> + { + Subscription subscription = new Subscription(); + subscription.id = (int) filter.subscriptionId(); + subscription.filter = filter.pattern().asString(); + subscription.flags = filter.flags(); + subscription.qos = filter.qos(); + subscriptions.add(subscription); + }); + final int qos = subscriptions.get(0).qos; + client.subscribeStreams.put(qos, this); + + doSubscribeBegin(traceId, authorization, affinity); + doSubscribeWindow(traceId, authorization, client.encodeSlotOffset, encodeBudgetMax); + } + + private void onSubscribeFlush( + FlushFW flush) + { + final long sequence = flush.sequence(); + final long acknowledge = flush.acknowledge(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + + assert initialAck <= initialSeq; + + final OctetsFW extension = flush.extension(); + final MqttFlushExFW mqttFlushEx = extension.get(mqttFlushExRO::tryWrap); + + assert mqttFlushEx.kind() == MqttFlushExFW.KIND_SUBSCRIBE; + final MqttSubscribeFlushExFW mqttSubscribeFlushEx = mqttFlushEx.subscribe(); + + Array32FW filters = mqttSubscribeFlushEx.filters(); + + final List newSubscribeState = new ArrayList<>(); + filters.forEach(f -> + { + Subscription subscription = new Subscription(); + subscription.id = (int) f.subscriptionId(); + subscription.filter = f.pattern().asString(); + subscription.flags = f.flags(); + subscription.qos = f.qos(); + newSubscribeState.add(subscription); + }); + + this.subscriptions = newSubscribeState; + } + + private void doSubscribeBegin( + long traceId, + long authorization, + long affinity) + { + state = MqttState.openingReply(state); + + doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, EMPTY_OCTETS); + } + + + private void doSubscribeAbort( + long traceId, + long authorization) + { + if (!MqttState.replyClosed(state)) + { + state = MqttState.closeReply(state); + + doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void doSubscribeEnd( + long traceId, + long authorization) + { + if (!MqttState.replyClosed(state)) + { + state = MqttState.closeReply(state); + + doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void onSubscribeReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + final long authorization = reset.authorization(); + + state = MqttState.closeReply(state); + + client.doNetworkReset(traceId, authorization); + } + + private void onSubscribeWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long authorization = window.authorization(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyPad = padding; + state = MqttState.openReply(state); + + client.doNetworkWindow(traceId, authorization, padding, budgetId); + client.decodeNetwork(traceId); + } + + private void onSubscribeEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = MqttState.closeInitial(state); + + assert initialAck <= initialSeq; + + final int packetId = client.nextPacketId(); + client.doEncodeUnsubscribe(traceId, authorization, subscriptions, packetId); + doSubscribeEnd(traceId, authorization); + } + + private void onSubscribeAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = MqttState.closeInitial(state); + + assert initialAck <= initialSeq; + + client.doNetworkAbort(traceId, authorization); + } + + private void cleanupAbort( + long traceId, + long authorization) + { + doSubscribeAbort(traceId, authorization); + doSubscribeReset(traceId, authorization); + } + + + private void doSubscribeWindow( + long traceId, + long authorization, + int minInitialNoAck, + int minInitialMax) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !MqttState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + state = MqttState.openInitial(state); + + doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, client.encodeBudgetId, PUBLISH_FRAMING); + } + } + + private void doSubscribeReset( + long traceId, + long authorization) + { + if (!MqttState.initialClosed(state)) + { + state = MqttState.closeInitial(state); + + doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + + private void doSubscribeData( + long traceId, + long authorization, + int reserved, + OctetsFW payload, + Flyweight extension) + { + assert MqttState.replyOpening(state); + + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); + final int length = limit - offset; + assert reserved >= length + replyPad; + + doData(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, budgetId, reserved, buffer, offset, length, extension); + + replySeq += reserved; + + assert replySeq <= replyAck + replyMax; + } + } + + + private class MqttPublishStream + { + private final MessageConsumer application; + private final long originId; + private final long routedId; + private final long initialId; + private final long replyId; + private long budgetId; + + private MqttClient client; + private BudgetDebitor debitor; + private long debitorIndex = NO_DEBITOR_INDEX; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private int state; + + private long publishExpiresId = NO_CANCEL_ID; + private long publishExpiresAt; + private String topic; + + + MqttPublishStream( + MqttClient client, + MessageConsumer application, + long originId, + long routedId, + long initialId) + { + this.client = client; + this.application = application; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + } + + private void onPublish( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onPublishBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onPublishData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onPublishEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onPublishAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onPublishWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onPublishReset(reset); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onPublishSignal(signal); + break; + } + } + + private void onPublishBegin( + BeginFW begin) + { + final long sequence = begin.sequence(); + final long acknowledge = begin.acknowledge(); + final int maximum = begin.maximum(); + final long traceId = begin.traceId(); + final long authorization = begin.authorization(); + final long affinity = begin.affinity(); + final OctetsFW extension = begin.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; + + initialSeq = sequence; + initialAck = acknowledge; + initialMax = maximum; + state = MqttState.openingInitial(state); + + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_PUBLISH; + final MqttPublishBeginExFW mqttPublishBeginEx = mqttBeginEx.publish(); + + this.topic = mqttPublishBeginEx.topic().asString(); + client.publishStreams.add(this); + + doPublishBegin(traceId, authorization, affinity); + doPublishWindow(traceId, authorization, client.encodeSlotOffset, encodeBudgetMax); + } + + private void onPublishData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int reserved = data.reserved(); + final long authorization = data.authorization(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge <= initialAck; + + initialSeq = sequence + reserved; + client.encodeSharedBudget -= reserved; + + assert initialAck <= initialSeq; + + if (initialSeq > initialAck + initialMax) + { + doPublishAbort(traceId, authorization); + client.doNetworkAbort(traceId, authorization); + } + else + { + if (payload != null && payload.sizeof() <= maximumPacketSize) + { + client.doEncodePublish(traceId, authorization, flags, payload, extension, topic); + } + else + { + droppedHandler.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + doPublishWindow(traceId, authorization, client.encodeSlotOffset, encodeBudgetMax); + } + } + + private void onPublishEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + final long authorization = end.authorization(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = MqttState.closeInitial(state); + + assert initialAck <= initialSeq; + + doPublishEnd(traceId, authorization); + } + + private void onPublishAbort( + AbortFW abort) + { + final long sequence = abort.sequence(); + final long acknowledge = abort.acknowledge(); + final long traceId = abort.traceId(); + final long authorization = abort.authorization(); + + assert acknowledge <= sequence; + assert sequence >= initialSeq; + + initialSeq = sequence; + state = MqttState.closeInitial(state); + + assert initialAck <= initialSeq; + + client.doNetworkAbort(traceId, authorization); + } + + private void onPublishWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long authorization = window.authorization(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + + assert acknowledge <= sequence; + assert sequence <= replySeq; + assert acknowledge >= replyAck; + assert maximum >= replyMax; + + replyAck = acknowledge; + replyMax = maximum; + replyPad = padding; + state = MqttState.openReply(state); + + client.doNetworkWindow(traceId, authorization, padding, budgetId); + } + + private void onPublishReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + final long authorization = reset.authorization(); + + state = MqttState.closeReply(state); + + client.doNetworkReset(traceId, authorization); + } + + private void onPublishSignal( + SignalFW signal) + { + final int signalId = signal.signalId(); + + switch (signalId) + { + case PUBLISH_EXPIRED_SIGNAL: + onPublishExpiredSignal(signal); + break; + default: + break; + } + } + + private void onPublishExpiredSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final long authorization = signal.authorization(); + + final long now = System.currentTimeMillis(); + if (now >= publishExpiresAt) + { + doPublishEnd(traceId, authorization); + } + else + { + publishExpiresId = NO_CANCEL_ID; + doSignalPublishExpiration(); + } + } + + + private void doPublishBegin( + long traceId, + long authorization, + long affinity) + { + state = MqttState.openingReply(state); + + doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, affinity, EMPTY_OCTETS); + } + + private void doPublishAbort( + long traceId, + long authorization) + { + if (!MqttState.replyClosed(state)) + { + state = MqttState.closeReply(state); + + doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void doSignalPublishExpiration() + { + publishExpiresAt = System.currentTimeMillis() + publishTimeoutMillis; + + if (publishExpiresId == NO_CANCEL_ID) + { + publishExpiresId = + signaler.signalAt(publishExpiresAt, originId, routedId, initialId, PUBLISH_EXPIRED_SIGNAL, 0); + } + } + + private void doCancelPublishExpiration() + { + if (publishExpiresId != NO_CANCEL_ID) + { + signaler.cancel(publishExpiresId); + publishExpiresId = NO_CANCEL_ID; + } + } + + private void doPublishWindow( + long traceId, + long authorization, + int minInitialNoAck, + int minInitialMax) + { + final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck); + + if (newInitialAck > initialAck || minInitialMax > initialMax || !MqttState.initialOpened(state)) + { + initialAck = newInitialAck; + assert initialAck <= initialSeq; + + initialMax = minInitialMax; + + state = MqttState.openInitial(state); + + doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, client.encodeBudgetId, PUBLISH_FRAMING); + } + } + + private void doPublishReset( + long traceId, + long authorization) + { + if (!MqttState.initialClosed(state)) + { + state = MqttState.closeInitial(state); + + doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void doPublishEnd( + long traceId, + long authorization) + { + if (!MqttState.replyClosed(state)) + { + state = MqttState.closeReply(state); + + doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, EMPTY_OCTETS); + } + } + + private void cleanupAbort( + long traceId, + long authorization) + { + doPublishAbort(traceId, authorization); + doPublishReset(traceId, authorization); + doCancelPublishExpiration(); + } + } + + private static boolean isSetSessionExpiryInterval( + int flags) + { + return (flags & CONNACK_SESSION_EXPIRY_MASK) != 0; + } + + private static boolean isSetTopicAliasMaximum( + int flags) + { + return (flags & CONNACK_TOPIC_ALIAS_MAXIMUM_MASK) != 0; + } + + private static boolean isSetMaximumQos( + int flags) + { + return (flags & CONNACK_MAXIMUM_QOS_MASK) != 0; + } + + private static boolean isSetMaximumPacketSize( + int flags) + { + return (flags & CONNACK_MAXIMUM_PACKET_SIZE_MASK) != 0; + } + + private static boolean isSetRetainAvailable( + int flags) + { + return (flags & CONNACK_RETAIN_AVAILABLE_MASK) != 0; + } + + private static boolean isSetAssignedClientId( + int flags) + { + return (flags & CONNACK_ASSIGNED_CLIENT_IDENTIFIER_MASK) != 0; + } + + private static boolean isSetWildcardSubscriptions( + int flags) + { + return (flags & CONNACK_WILDCARD_SUBSCRIPTION_AVAILABLE_MASK) != 0; + } + + private static boolean isSetSubscriptionIdentifiers( + int flags) + { + return (flags & CONNACK_SUBSCRIPTION_IDENTIFIERS_MASK) != 0; + } + + private static boolean isSetSharedSubscriptions( + int flags) + { + return (flags & CONNACK_SHARED_SUBSCRIPTION_AVAILABLE_MASK) != 0; + } + + private static boolean isSetServerKeepAlive( + int flags) + { + return (flags & CONNACK_KEEP_ALIVE_MASK) != 0; + } + + private static boolean isSetWillFlag( + int flags) + { + return (flags & MqttSessionFlags.WILL.value() << 1) != 0; + } + + private static int decodeConnackFlags( + int flags) + { + int reasonCode = SUCCESS; + + if ((flags & CONNACK_RESERVED_FLAGS_MASK) != 0) + { + reasonCode = MALFORMED_PACKET; + } + + return reasonCode; + } + + private final class MqttPublishHeader + { + private String topic; + private int flags; + private int expiryInterval = DEFAULT_EXPIRY_INTERVAL; + private String16FW contentType = NULL_STRING; + private MqttPayloadFormat payloadFormat = DEFAULT_FORMAT; + private String16FW responseTopic = NULL_STRING; + private OctetsFW correlationData = null; + private int qos; + private boolean retained = false; + + private MqttPublishHeader reset() + { + this.topic = null; + this.flags = 0; + this.expiryInterval = DEFAULT_EXPIRY_INTERVAL; + this.contentType = NULL_STRING; + this.payloadFormat = DEFAULT_FORMAT; + this.responseTopic = NULL_STRING; + this.correlationData = null; + + return this; + } + + private int decode( + MqttClient client, + String16FW topicName, + MqttPropertiesFW properties, + int typeAndFlags) + { + this.topic = topicName.asString(); + int reasonCode = SUCCESS; + userPropertiesRW.wrap(userPropertiesBuffer, 0, userPropertiesBuffer.capacity()); + subscriptionIdsRW.wrap(subscriptionIdsBuffer, 0, subscriptionIdsBuffer.capacity()); + + final OctetsFW propertiesValue = properties.value(); + final DirectBuffer decodeBuffer = propertiesValue.buffer(); + final int decodeOffset = propertiesValue.offset(); + final int decodeLimit = propertiesValue.limit(); + + if (topic == null) + { + reasonCode = PROTOCOL_ERROR; + } + else + { + flags = calculatePublishFlags(typeAndFlags); + qos = calculatePublishQos(typeAndFlags); + + int alias = 0; + if (qos > client.maximumQos) + { + reasonCode = QOS_NOT_SUPPORTED; + } + else if (retained && !isRetainAvailable(client.capabilities)) + { + reasonCode = RETAIN_NOT_SUPPORTED; + } + else + { + decode: + for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) + { + final MqttPropertyFW mqttProperty = mqttPropertyRO.wrap(decodeBuffer, decodeProgress, decodeLimit); + switch (mqttProperty.kind()) + { + case KIND_EXPIRY_INTERVAL: + expiryInterval = mqttProperty.expiryInterval(); + break; + case KIND_CONTENT_TYPE: + final String16FW mContentType = mqttProperty.contentType(); + if (mContentType.value() != null) + { + final int offset = mContentType.offset(); + final int limit = mContentType.limit(); + + contentType = contentTypeRO.wrap(mContentType.buffer(), offset, limit); + } + break; + case KIND_TOPIC_ALIAS: + if (alias != 0) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + + alias = mqttProperty.topicAlias() & 0xFFFF; + + if (alias <= 0 || alias > client.topicAliasMaximum) + { + reasonCode = TOPIC_ALIAS_INVALID; + break decode; + } + + if (topic.isEmpty()) + { + if (!client.topicAliases.containsKey(alias)) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + topic = client.topicAliases.get(alias); + } + else + { + client.topicAliases.put(alias, topic); + } + break; + case KIND_SUBSCRIPTION_ID: + subscriptionIdsRW.item(i -> i.set(mqttProperty.subscriptionId().value())); + break; + case KIND_PAYLOAD_FORMAT: + payloadFormat = MqttPayloadFormat.valueOf(mqttProperty.payloadFormat()); + break; + case KIND_RESPONSE_TOPIC: + final String16FW mResponseTopic = mqttProperty.responseTopic(); + if (mResponseTopic.value() != null) + { + final int offset = mResponseTopic.offset(); + final int limit = mResponseTopic.limit(); + + responseTopic = responseTopicRO.wrap(mResponseTopic.buffer(), offset, limit); + } + break; + case KIND_CORRELATION_DATA: + correlationData = mqttProperty.correlationData().bytes(); + break; + case KIND_USER_PROPERTY: + final MqttUserPropertyFW userProperty = mqttProperty.userProperty(); + userPropertiesRW.item(c -> c.key(userProperty.key()).value(userProperty.value())); + break; + default: + reasonCode = MALFORMED_PACKET; + break decode; + } + decodeProgress = mqttProperty.limit(); + } + } + } + + return reasonCode; + } + private int calculatePublishQos( + int networkTypeAndFlags) + { + int qos = 0; + if ((networkTypeAndFlags & PUBLISH_QOS1_MASK) != 0) + { + qos = 1; + } + else if ((networkTypeAndFlags & PUBLISH_QOS2_MASK) != 0) + { + qos = 2; + } + return qos; + } + + private int calculatePublishFlags( + int networkTypeAndFlags) + { + int flags = 0; + + if ((networkTypeAndFlags & RETAIN_MASK) != 0) + { + flags |= RETAIN_FLAG; + retained = true; + } + return flags; + } + } + + private boolean isRetainAvailable( + int capabilities) + { + return (capabilities & 1 << MqttServerCapabilities.RETAIN.value()) != 0; + } + + private final class Subscription + { + private int id = 0; + private String filter; + private int qos; + private int flags; + private int reasonCode; + + + @Override + public boolean equals(Object obj) + { + if (obj == this) + { + return true; + } + if (!(obj instanceof Subscription)) + { + return false; + } + Subscription other = (Subscription) obj; + return this.id == other.id && Objects.equals(this.filter, other.filter) && + this.qos == other.qos && this.flags == other.flags && this.reasonCode == other.reasonCode; + } + + @Override + public int hashCode() + { + return Objects.hash(this.id, this.filter, this.qos, this.flags, this.reasonCode); + } + } } + diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index d0126aed5b..80ae2afed8 100644 --- a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -19,6 +19,7 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.BAD_USER_NAME_OR_PASSWORD; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.CLIENT_IDENTIFIER_NOT_VALID; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.DISCONNECT_WITH_WILL_MESSAGE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.GRANTED_QOS_2; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.KEEP_ALIVE_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.MALFORMED_PACKET; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.NORMAL_DISCONNECT; @@ -112,9 +113,9 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.types.Flyweight; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttBinaryFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttPayloadFormat; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttPayloadFormatFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttQoS; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttSessionStateFW; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttTopicFilterFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.MqttWillMessageFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.OctetsFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.String16FW; @@ -147,8 +148,8 @@ import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttFlushExFW; -import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttPublishDataExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttResetExFW; +import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttServerCapabilities; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSessionBeginExFW; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.MqttSessionDataKind; import io.aklivity.zilla.runtime.binding.mqtt.internal.types.stream.ResetFW; @@ -184,7 +185,10 @@ public final class MqttServerFactory implements MqttStreamFactory private static final int NO_LOCAL_FLAG_MASK = 0b0000_0100; private static final int RETAIN_AS_PUBLISHED_MASK = 0b0000_1000; private static final int RETAIN_HANDLING_MASK = 0b0011_0000; - private static final int BASIC_AUTHENTICATION_MASK = 0b1100_0000; + private static final int RETAIN_AVAILABLE_MASK = 1 << MqttServerCapabilities.RETAIN.value(); + private static final int WILDCARD_AVAILABLE_MASK = 1 << MqttServerCapabilities.WILDCARD.value(); + private static final int SUBSCRIPTION_IDS_AVAILABLE_MASK = 1 << MqttServerCapabilities.SUBSCRIPTION_IDS.value(); + private static final int SHARED_SUBSCRIPTIONS_AVAILABLE_MASK = 1 << MqttServerCapabilities.SHARED_SUBSCRIPTIONS.value(); private static final int WILL_FLAG_MASK = 0b0000_0100; private static final int CLEAN_START_FLAG_MASK = 0b0000_0010; @@ -235,7 +239,6 @@ public final class MqttServerFactory implements MqttStreamFactory private final ResetFW.Builder resetRW = new ResetFW.Builder(); private final FlushFW.Builder flushRW = new FlushFW.Builder(); - private final MqttPublishDataExFW mqttPublishDataExRO = new MqttPublishDataExFW(); private final MqttDataExFW mqttSubscribeDataExRO = new MqttDataExFW(); private final MqttResetExFW mqttResetExRO = new MqttResetExFW(); private final MqttBeginExFW mqttBeginExRO = new MqttBeginExFW(); @@ -244,7 +247,6 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttBeginExFW.Builder mqttSubscribeBeginExRW = new MqttBeginExFW.Builder(); private final MqttBeginExFW.Builder mqttSessionBeginExRW = new MqttBeginExFW.Builder(); private final MqttDataExFW.Builder mqttPublishDataExRW = new MqttDataExFW.Builder(); - private final MqttDataExFW.Builder mqttSubscribeDataExRW = new MqttDataExFW.Builder(); private final MqttDataExFW.Builder mqttSessionDataExRW = new MqttDataExFW.Builder(); private final MqttFlushExFW.Builder mqttFlushExRW = new MqttFlushExFW.Builder(); private final MqttWillMessageFW.Builder mqttWillMessageRW = new MqttWillMessageFW.Builder(); @@ -267,20 +269,11 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttPropertyFW mqttPropertyRO = new MqttPropertyFW(); private final MqttPropertyFW.Builder mqttPropertyRW = new MqttPropertyFW.Builder(); - private final MqttPropertiesFW mqttPropertiesRO = new MqttPropertiesFW(); private final MqttSessionStateFW mqttSessionStateRO = new MqttSessionStateFW(); - private final String16FW.Builder clientIdRW = new String16FW.Builder(BIG_ENDIAN); - - private final String16FW clientIdRO = new String16FW(BIG_ENDIAN); private final String16FW contentTypeRO = new String16FW(BIG_ENDIAN); private final String16FW responseTopicRO = new String16FW(BIG_ENDIAN); - private final String16FW willTopicRO = new String16FW(BIG_ENDIAN); private final String16FW usernameRO = new String16FW(BIG_ENDIAN); - - private final OctetsFW.Builder sessionPayloadRW = new OctetsFW.Builder(); - - private final BinaryFW willPayloadRO = new BinaryFW(); private final BinaryFW passwordRO = new BinaryFW(); private final MqttPublishHeader mqttPublishHeaderRO = new MqttPublishHeader(); @@ -295,9 +288,6 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttDisconnectFW.Builder mqttDisconnectRW = new MqttDisconnectFW.Builder(); private final Array32FW.Builder userPropertiesRW = new Array32FW.Builder<>(new MqttUserPropertyFW.Builder(), new MqttUserPropertyFW()); - - private final Array32FW.Builder topicFiltersRW = - new Array32FW.Builder<>(new MqttTopicFilterFW.Builder(), new MqttTopicFilterFW()); private final Array32FW.Builder willUserPropertiesRW = new Array32FW.Builder<>(new MqttUserPropertyFW.Builder(), new MqttUserPropertyFW()); @@ -314,7 +304,6 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttServerDecoder decodeUnknownType = this::decodeUnknownType; private final Map decodersByPacketType; - private final boolean session; private final String serverRef; private int maximumPacketSize; @@ -336,16 +325,15 @@ public final class MqttServerFactory implements MqttStreamFactory private final MutableDirectBuffer writeBuffer; private final MutableDirectBuffer extBuffer; private final MutableDirectBuffer dataExtBuffer; - private final MutableDirectBuffer clientIdBuffer; - private final MutableDirectBuffer willDataExtBuffer; private final MutableDirectBuffer payloadBuffer; private final MutableDirectBuffer propertyBuffer; private final MutableDirectBuffer sessionExtBuffer; private final MutableDirectBuffer sessionStateBuffer; private final MutableDirectBuffer userPropertiesBuffer; private final MutableDirectBuffer willMessageBuffer; - private final MutableDirectBuffer willPropertyBuffer; private final MutableDirectBuffer willUserPropertiesBuffer; + + private final ByteBuffer charsetBuffer; private final BufferPool bufferPool; private final BudgetCreditor creditor; private final Signaler signaler; @@ -366,12 +354,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final short keepAliveMinimum; private final short keepAliveMaximum; - private final byte maximumQos; - private final byte retainedMessages; private final short topicAliasMaximumLimit; - private final byte wildcardSubscriptions; - private final byte subscriptionIdentifiers; - private final byte sharedSubscriptions; private final boolean noLocal; private final int sessionExpiryGracePeriod; private final Supplier supplyClientId; @@ -387,15 +370,13 @@ public MqttServerFactory( this.writeBuffer = context.writeBuffer(); this.extBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.dataExtBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); - this.clientIdBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); - this.willDataExtBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.propertyBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.userPropertiesBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); + this.charsetBuffer = ByteBuffer.wrap(new byte[writeBuffer.capacity()]); this.payloadBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.sessionExtBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.sessionStateBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.willMessageBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); - this.willPropertyBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.willUserPropertiesBuffer = new UnsafeBuffer(new byte[writeBuffer.capacity()]); this.bufferPool = context.bufferPool(); this.creditor = context.creditor(); @@ -414,13 +395,7 @@ public MqttServerFactory( this.connectTimeoutMillis = SECONDS.toMillis(config.connectTimeout()); this.keepAliveMinimum = config.keepAliveMinimum(); this.keepAliveMaximum = config.keepAliveMaximum(); - this.maximumQos = config.maximumQos(); this.maximumPacketSize = writeBuffer.capacity(); - this.retainedMessages = config.retainAvailable() ? (byte) 1 : 0; - this.wildcardSubscriptions = config.wildcardSubscriptionAvailable() ? (byte) 1 : 0; - this.subscriptionIdentifiers = config.subscriptionIdentifierAvailable() ? (byte) 1 : 0; - this.sharedSubscriptions = config.sharedSubscriptionAvailable() ? (byte) 1 : 0; - this.session = config.sessionsAvailable(); this.topicAliasMaximumLimit = (short) Math.max(config.topicAliasMaximum(), 0); this.noLocal = config.noLocal(); this.sessionExpiryGracePeriod = config.sessionExpiryGracePeriod(); @@ -981,14 +956,17 @@ private int decodePublish( return progress; } - private boolean invalidUtf8(OctetsFW payload) + private boolean invalidUtf8( + OctetsFW payload) { boolean invalid = false; - byte[] payloadBytes = new byte[payload.sizeof()]; - payload.value().getBytes(0, payloadBytes); + byte[] payloadBytes = charsetBuffer.array(); + final int payloadSize = payload.sizeof(); + payload.value().getBytes(0, payloadBytes, 0, payloadSize); try { - utf8Decoder.decode(ByteBuffer.wrap(payloadBytes)); + charsetBuffer.position(0).limit(payloadSize); + utf8Decoder.decode(charsetBuffer); } catch (CharacterCodingException ex) { @@ -1010,6 +988,7 @@ private int decodeSubscribe( int progress = offset; + decode: if (length > 0) { int reasonCode = SUCCESS; @@ -1026,6 +1005,11 @@ else if ((subscribe.typeAndFlags() & 0b1111_1111) != SUBSCRIBE_FIXED_HEADER) if (reasonCode == 0) { + if (!MqttState.replyOpened(server.sessionStream.state)) + { + //We don't know the server capabilities yet + break decode; + } server.onDecodeSubscribe(traceId, authorization, subscribe); server.decoder = decodePacketType; progress = subscribe.limit(); @@ -1129,10 +1113,20 @@ private int decodeDisconnect( int progress = offset; + decode: if (length > 0) { int reasonCode = NORMAL_DISCONNECT; + + if (limit - offset == 2) + { + server.onDecodeDisconnect(traceId, authorization, null); + progress = limit; + server.decoder = decodePacketType; + break decode; + } + final MqttDisconnectFW disconnect = mqttDisconnectRO.tryWrap(buffer, offset, limit); if (disconnect == null) { @@ -1251,6 +1245,9 @@ private final class MqttServer private long keepAliveTimeoutId = NO_CANCEL_ID; private long keepAliveTimeoutAt; + private int maximumQos; + private int packetSizeMax; + private int capabilities = RETAIN_AVAILABLE_MASK | SUBSCRIPTION_IDS_AVAILABLE_MASK | WILDCARD_AVAILABLE_MASK; private boolean serverDefinedKeepAlive = false; private short keepAlive; private long keepAliveTimeout; @@ -1516,10 +1513,7 @@ private void onKeepAliveTimeoutSignal( final long now = System.currentTimeMillis(); if (now >= keepAliveTimeoutAt) { - if (session) - { - sessionStream.doSessionAbort(traceId); - } + sessionStream.doSessionAbort(traceId); onDecodeError(traceId, authorization, KEEP_ALIVE_TIMEOUT); decoder = decodeIgnoreAll; } @@ -1676,12 +1670,6 @@ else if (length > MAXIMUM_CLIENT_ID_LENGTH) keepAliveTimeout = Math.round(TimeUnit.SECONDS.toMillis(keepAlive) * 1.5); connectFlags = connect.flags(); doSignalKeepAliveTimeout(); - - if (session) - { - resolveSession(traceId, authorization, connectFlags); - } - doCancelConnectTimeout(); } @@ -1704,11 +1692,14 @@ private int onDecodeConnectPayload( int progress, int limit) { - byte reasonCode; + byte reasonCode = SUCCESS; decode: { final MqttConnectPayload payload = mqttConnectPayloadRO.reset(); int connectPayloadLimit = payload.decode(buffer, progress, limit, connectFlags); + + final boolean willFlagSet = isSetWillFlag(connectFlags); + reasonCode = payload.reasonCode; if (reasonCode != SUCCESS) @@ -1740,29 +1731,48 @@ else if (this.authField.equals(MqttConnectProperty.PASSWORD)) final MqttBindingConfig binding = bindings.get(routedId); - final MqttRouteConfig resolved = binding != null ? binding.resolve(sessionAuth) : null; + final MqttRouteConfig resolved = binding != null ? + binding.resolveSession(sessionAuth, clientId.asString()) : null; if (resolved == null) { reasonCode = BAD_USER_NAME_OR_PASSWORD; break decode; } + else + { + resolveSession(traceId, resolved.id); + } + + if (willFlagSet && !MqttState.initialOpened(sessionStream.state)) + { + break decode; + } - this.sessionId = sessionAuth; - if (!session) + if (isSetWillRetain(connectFlags)) { - doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, - false, null); - connected = true; + if (!retainAvailable(capabilities)) + { + reasonCode = RETAIN_NOT_SUPPORTED; + break decode; + } + payload.willRetain = (byte) RETAIN_FLAG; } + if (payload.willQos > maximumQos) + { + reasonCode = QOS_NOT_SUPPORTED; + break decode; + } + + this.sessionId = sessionAuth; + final int flags = connectFlags; - final boolean willFlagSet = isSetWillFlag(flags); final int willFlags = decodeWillFlags(flags); final int willQos = decodeWillQos(flags); - if (session && willFlagSet) + if (willFlagSet) { final MqttDataExFW.Builder sessionDataExBuilder = mqttSessionDataExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) @@ -1807,7 +1817,7 @@ else if (this.authField.equals(MqttConnectProperty.PASSWORD)) doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); } - if (session) + if (sessionStream != null) { sessionStream.doSessionAppEnd(traceId, EMPTY_OCTETS); } @@ -1823,12 +1833,9 @@ else if (this.authField.equals(MqttConnectProperty.PASSWORD)) private void resolveSession( long traceId, - long authorization, - int flags) + long resolvedId) { - final MqttBindingConfig binding = bindings.get(routedId); - - final MqttRouteConfig resolved = binding != null ? binding.resolveSession(authorization, clientId.asString()) : null; + final int flags = connectFlags & (CLEAN_START_FLAG_MASK | WILL_FLAG_MASK); final MqttBeginExFW.Builder builder = mqttSessionBeginExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) .typeId(mqttTypeId) @@ -1840,7 +1847,7 @@ private void resolveSession( if (sessionStream == null) { - sessionStream = new MqttSessionStream(originId, resolved.id, 0); + sessionStream = new MqttSessionStream(originId, resolvedId, 0); } sessionStream.doSessionBegin(traceId, affinity, builder.build()); @@ -1880,31 +1887,49 @@ private void onDecodePublish( int reserved, OctetsFW payload) { - final int topicKey = topicKey(mqttPublishHeaderRO.topic); - MqttPublishStream stream = publishStreams.get(topicKey); - - final MqttDataExFW.Builder builder = mqttPublishDataExRW.wrap(dataExtBuffer, 0, dataExtBuffer.capacity()) - .typeId(mqttTypeId) - .publish(publishBuilder -> - { - publishBuilder.qos(mqttPublishHeaderRO.qos); - publishBuilder.flags(mqttPublishHeaderRO.flags); - publishBuilder.expiryInterval(mqttPublishHeaderRO.expiryInterval); - publishBuilder.contentType(mqttPublishHeaderRO.contentType); - publishBuilder.format(f -> f.set(mqttPublishHeaderRO.payloadFormat)); - publishBuilder.responseTopic(mqttPublishHeaderRO.responseTopic); - publishBuilder.correlation(c -> c.bytes(mqttPublishHeaderRO.correlationData)); - final Array32FW userProperties = userPropertiesRW.build(); - userProperties.forEach(c -> publishBuilder.propertiesItem(p -> p.key(c.key()).value(c.value()))); - }); - + int reasonCode = SUCCESS; + if (mqttPublishHeaderRO.qos > maximumQos) + { + reasonCode = QOS_NOT_SUPPORTED; + } + else if (mqttPublishHeaderRO.retained && !retainAvailable(capabilities)) + { + reasonCode = RETAIN_NOT_SUPPORTED; + } - final MqttDataExFW dataEx = builder.build(); - if (stream != null) + if (reasonCode != SUCCESS) { - stream.doPublishData(traceId, reserved, payload, dataEx); + onDecodeError(traceId, authorization, reasonCode); + decoder = decodeIgnoreAll; + } + else + { + final int topicKey = topicKey(mqttPublishHeaderRO.topic); + MqttPublishStream stream = publishStreams.get(topicKey); + + final MqttDataExFW.Builder builder = mqttPublishDataExRW.wrap(dataExtBuffer, 0, dataExtBuffer.capacity()) + .typeId(mqttTypeId) + .publish(p -> + { + p.qos(mqttPublishHeaderRO.qos) + .flags(mqttPublishHeaderRO.flags) + .expiryInterval(mqttPublishHeaderRO.expiryInterval) + .contentType(mqttPublishHeaderRO.contentType) + .format(f -> f.set(mqttPublishHeaderRO.payloadFormat)) + .responseTopic(mqttPublishHeaderRO.responseTopic) + .correlation(c -> c.bytes(mqttPublishHeaderRO.correlationData)); + final Array32FW userProperties = userPropertiesRW.build(); + userProperties.forEach(c -> p.propertiesItem(pi -> pi.key(c.key()).value(c.value()))); + }); + + + final MqttDataExFW dataEx = builder.build(); + if (stream != null) + { + stream.doPublishData(traceId, reserved, payload, dataEx); + } + doSignalKeepAliveTimeout(); } - doSignalKeepAliveTimeout(); } private void onDecodeSubscribe( @@ -1929,9 +1954,10 @@ private void onDecodeSubscribe( final int propertiesLimit = propertiesValue.limit(); MqttPropertyFW mqttProperty; - for (int progress = propertiesOffset; progress < propertiesLimit; progress = mqttProperty.limit()) + for (int propertiesProgress = propertiesOffset; + propertiesProgress < propertiesLimit; propertiesProgress = mqttProperty.limit()) { - mqttProperty = mqttPropertyRO.tryWrap(propertiesBuffer, progress, propertiesLimit); + mqttProperty = mqttPropertyRO.tryWrap(propertiesBuffer, propertiesProgress, propertiesLimit); switch (mqttProperty.kind()) { case KIND_SUBSCRIPTION_ID: @@ -1950,73 +1976,75 @@ private void onDecodeSubscribe( { final List newSubscriptions = new ArrayList<>(); - for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) + decode: { - final MqttSubscribePayloadFW mqttSubscribePayload = - mqttSubscribePayloadRO.tryWrap(decodeBuffer, decodeProgress, decodeLimit); - if (mqttSubscribePayload == null) - { - break; - } - decodeProgress = mqttSubscribePayload.limit(); - final String filter = mqttSubscribePayload.filter().asString(); - if (filter == null) + for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) { - onDecodeError(traceId, authorization, PROTOCOL_ERROR); - decoder = decodeIgnoreAll; - break; - } + final MqttSubscribePayloadFW mqttSubscribePayload = + mqttSubscribePayloadRO.tryWrap(decodeBuffer, decodeProgress, decodeLimit); + if (mqttSubscribePayload == null) + { + break; + } + decodeProgress = mqttSubscribePayload.limit(); - final boolean validTopicFilter = validator.isTopicFilterValid(filter); - if (!validTopicFilter) - { - onDecodeError(traceId, authorization, PROTOCOL_ERROR); - decoder = decodeIgnoreAll; - break; - } - if (wildcardSubscriptions == 0 && (filter.contains("+") || filter.contains("#"))) - { - onDecodeError(traceId, authorization, WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED); - decoder = decodeIgnoreAll; - break; - } + final String filter = mqttSubscribePayload.filter().asString(); + if (filter == null) + { + onDecodeError(traceId, authorization, PROTOCOL_ERROR); + decoder = decodeIgnoreAll; + break; + } - if (sharedSubscriptions == 0 && filter.contains(SHARED_SUBSCRIPTION_LITERAL)) - { - onDecodeError(traceId, authorization, SHARED_SUBSCRIPTION_NOT_SUPPORTED); - decoder = decodeIgnoreAll; - break; - } + final boolean validTopicFilter = validator.isTopicFilterValid(filter); + if (!validTopicFilter) + { + onDecodeError(traceId, authorization, PROTOCOL_ERROR); + decoder = decodeIgnoreAll; + break; + } + if (!wildcardAvailable(capabilities) && (filter.contains("+") || filter.contains("#"))) + { + onDecodeError(traceId, authorization, WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED); + decoder = decodeIgnoreAll; + break; + } - if (subscriptionIdentifiers == 0 && containsSubscriptionId) - { - onDecodeError(traceId, authorization, SUBSCRIPTION_IDS_NOT_SUPPORTED); - decoder = decodeIgnoreAll; - break; - } + if (!sharedSubscriptionAvailable(capabilities) && filter.contains(SHARED_SUBSCRIPTION_LITERAL)) + { + onDecodeError(traceId, authorization, SHARED_SUBSCRIPTION_NOT_SUPPORTED); + decoder = decodeIgnoreAll; + break; + } + + if (!subscriptionIdsAvailable(capabilities) && containsSubscriptionId) + { + onDecodeError(traceId, authorization, SUBSCRIPTION_IDS_NOT_SUPPORTED); + decoder = decodeIgnoreAll; + break; + } - final int options = mqttSubscribePayload.options(); - final int flags = calculateSubscribeFlags(traceId, authorization, options); + final int options = mqttSubscribePayload.options(); + final int flags = calculateSubscribeFlags(traceId, authorization, options); - if (!noLocal && isSetNoLocal(flags)) - { - onDecodeError(traceId, authorization, PROTOCOL_ERROR); - decoder = decodeIgnoreAll; - break; - } + if (!noLocal && isSetNoLocal(flags)) + { + onDecodeError(traceId, authorization, PROTOCOL_ERROR); + decoder = decodeIgnoreAll; + break; + } - Subscription subscription = new Subscription(); - subscription.id = subscriptionId; - subscription.filter = filter; - subscription.flags = flags; - //TODO: what if we don't have a subscriptionId - subscribePacketIds.put(subscriptionId, packetId); - newSubscriptions.add(subscription); - } + Subscription subscription = new Subscription(); + subscription.id = subscriptionId; + subscription.filter = filter; + subscription.flags = flags; + //TODO: what if we don't have a subscriptionId + subscribePacketIds.put(subscriptionId, packetId); + + newSubscriptions.add(subscription); + } - if (session) - { final MqttDataExFW.Builder sessionDataExBuilder = mqttSessionDataExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity()) .typeId(mqttTypeId) @@ -2045,12 +2073,12 @@ private void onDecodeSubscribe( final MqttSessionStateFW sessionState = state.build(); final int payloadSize = sessionState.sizeof(); + if (!sessionStream.hasSessionWindow(payloadSize)) + { + break decode; + } sessionStream.doSessionData(traceId, payloadSize, sessionDataExBuilder.build(), sessionState); } - else - { - openSubscribeStreams(packetId, traceId, authorization, newSubscriptions, false); - } } doSignalKeepAliveTimeout(); } @@ -2060,10 +2088,21 @@ private void openSubscribeStreams( long traceId, long authorization, List subscriptions, - boolean adminSubscribe) + boolean implicitSubscribe) { final Long2ObjectHashMap> subscriptionsByRouteId = new Long2ObjectHashMap<>(); + if (!implicitSubscribe) + { + final byte[] subscriptionPayload = new byte[subscriptions.size()]; + for (int i = 0; i < subscriptionPayload.length; i++) + { + subscriptionPayload[i] = (byte) subscriptions.get(i).reasonCode; + } + + doEncodeSuback(traceId, sessionId, packetId, subscriptionPayload); + } + for (Subscription subscription : subscriptions) { final MqttBindingConfig binding = bindings.get(routedId); @@ -2081,8 +2120,9 @@ private void openSubscribeStreams( { int subscribeKey = subscribeKey(clientId.asString(), key); MqttSubscribeStream stream = subscribeStreams.computeIfAbsent(subscribeKey, s -> - new MqttSubscribeStream(routedId, key, adminSubscribe)); + new MqttSubscribeStream(routedId, key, implicitSubscribe)); stream.packetId = packetId; + value.removeIf(s -> s.reasonCode > GRANTED_QOS_2); stream.doSubscribeBeginOrFlush(traceId, affinity, subscribeKey, value); }); } @@ -2129,23 +2169,25 @@ private void onDecodeUnsubscribe( } else { - if (session) - { - List unAckedSubscriptions = sessionStream.unAckedSubscriptions.stream() - .filter(s -> topicFilters.contains(s.filter) && subscribePacketIds.containsKey(s.id)) - .collect(Collectors.toList()); + List unAckedSubscriptions = sessionStream.unAckedSubscriptions.stream() + .filter(s -> topicFilters.contains(s.filter) && subscribePacketIds.containsKey(s.id)) + .collect(Collectors.toList()); - if (!unAckedSubscriptions.isEmpty()) - { - sessionStream.deferredUnsubscribes.put(packetId, topicFilters); - return; - } + if (!unAckedSubscriptions.isEmpty()) + { + sessionStream.deferredUnsubscribes.put(packetId, topicFilters); + return; + } + boolean matchingSubscription = topicFilters.stream().anyMatch(tf -> + sessionStream.subscriptions.stream().anyMatch(s -> s.filter.equals(tf))); + if (matchingSubscription) + { topicFilters.forEach(filter -> unsubscribePacketIds.put(filter, packetId)); doSendSessionState(traceId, topicFilters); } else { - sendUnsuback(packetId, traceId, authorization, topicFilters, false); + sendUnsuback(packetId, traceId, authorization, topicFilters, null, false); } doSignalKeepAliveTimeout(); } @@ -2169,12 +2211,11 @@ private void doSendSessionState( mqttSessionStateFW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()); newState.forEach(subscription -> - sessionStateBuilder.subscriptionsItem(subscriptionBuilder -> - { - subscriptionBuilder.pattern(subscription.filter); - subscriptionBuilder.subscriptionId(subscription.id); - subscriptionBuilder.flags(subscription.flags); - }) + sessionStateBuilder.subscriptionsItem(s -> + s.subscriptionId(subscription.id) + .qos(subscription.qos) + .flags(subscription.flags) + .pattern(subscription.filter)) ); final MqttSessionStateFW sessionState = sessionStateBuilder.build(); @@ -2188,6 +2229,7 @@ private void sendUnsuback( long traceId, long authorization, List topicFilters, + List newState, boolean adminUnsubscribe) { final MutableDirectBuffer encodeBuffer = payloadBuffer; @@ -2196,7 +2238,7 @@ private void sendUnsuback( int encodeProgress = encodeOffset; - final Map> filtersByStream = new HashMap<>(); + final Map> filtersByStream = new HashMap<>(); for (String topicFilter : topicFilters) { @@ -2206,11 +2248,11 @@ private void sendUnsuback( final int subscribeKey = subscribeKey(clientId.asString(), resolved.id); final MqttSubscribeStream stream = subscribeStreams.get(subscribeKey); - filtersByStream.computeIfAbsent(stream, s -> new ArrayList<>()).add(topicFilter); - Optional subscription = stream.getSubscriptionByFilter(topicFilter); + Optional subscription = stream.getSubscriptionByFilter(topicFilter, newState); - int encodeReasonCode = subscription.isPresent() ? SUCCESS : NO_SUBSCRIPTION_EXISTED; + subscription.ifPresent(value -> filtersByStream.computeIfAbsent(stream, s -> new ArrayList<>()).add(value)); + int encodeReasonCode = subscription.isPresent() ? subscription.get().reasonCode : NO_SUBSCRIPTION_EXISTED; final MqttUnsubackPayloadFW mqttUnsubackPayload = mqttUnsubackPayloadRW.wrap(encodeBuffer, encodeProgress, encodeLimit) .reasonCode(encodeReasonCode) @@ -2219,7 +2261,7 @@ private void sendUnsuback( } filtersByStream.forEach( - (stream, filters) -> stream.doSubscribeFlushOrEnd(traceId, filters)); + (stream, subscriptions) -> stream.doSubscribeFlushOrEnd(traceId, subscriptions)); if (!adminUnsubscribe) { final OctetsFW encodePayload = octetsRO.wrap(encodeBuffer, encodeOffset, encodeProgress); @@ -2241,7 +2283,7 @@ private void onDecodeDisconnect( long authorization, MqttDisconnectFW disconnect) { - byte reasonCode = decodeDisconnectProperties(disconnect.properties()); + byte reasonCode = disconnect != null ? decodeDisconnectProperties(disconnect.properties()) : SUCCESS; if (reasonCode != SUCCESS) { @@ -2250,16 +2292,13 @@ private void onDecodeDisconnect( } else { - if (session) + if (disconnect != null && disconnect.reasonCode() == DISCONNECT_WITH_WILL_MESSAGE) { - if (disconnect.reasonCode() == DISCONNECT_WITH_WILL_MESSAGE) - { - sessionStream.doSessionAbort(traceId); - } - else - { - sessionStream.doSessionAppEnd(traceId, EMPTY_OCTETS); - } + sessionStream.doSessionAbort(traceId); + } + else + { + sessionStream.doSessionAppEnd(traceId, EMPTY_OCTETS); } } @@ -2467,6 +2506,7 @@ private void doEncodePublish( final int expiryInterval = subscribeDataEx.subscribe().expiryInterval(); final String16FW contentType = subscribeDataEx.subscribe().contentType(); final String16FW responseTopic = subscribeDataEx.subscribe().responseTopic(); + final MqttPayloadFormatFW format = subscribeDataEx.subscribe().format(); final MqttBinaryFW correlation = subscribeDataEx.subscribe().correlation(); final Array32FW subscriptionIds = subscribeDataEx.subscribe().subscriptionIds(); final Array32FW properties = @@ -2517,11 +2557,13 @@ private void doEncodePublish( propertiesSize.set(mqttPropertyRW.limit()); } - // TODO: optional format - mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) - .payloadFormat((byte) subscribeDataEx.subscribe().format().get().ordinal()) - .build(); - propertiesSize.set(mqttPropertyRW.limit()); + if (!format.get().equals(MqttPayloadFormat.NONE)) + { + mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) + .payloadFormat((byte) subscribeDataEx.subscribe().format().get().ordinal()) + .build(); + propertiesSize.set(mqttPropertyRW.limit()); + } if (responseTopic.value() != null) { @@ -2616,15 +2658,15 @@ private void doEncodeConnack( if (0 <= maximumQos && maximumQos < 2) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) - .maximumQoS(maximumQos) + .maximumQoS((byte) maximumQos) .build(); propertiesSize = mqttProperty.limit(); } - if (retainedMessages == 0) + if (!retainAvailable(capabilities)) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) - .retainAvailable(retainedMessages) + .retainAvailable((byte) 0) .build(); propertiesSize = mqttProperty.limit(); } @@ -2637,26 +2679,26 @@ private void doEncodeConnack( propertiesSize = mqttProperty.limit(); } - if (wildcardSubscriptions == 0) + if (!subscriptionIdsAvailable(capabilities)) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) - .wildcardSubscriptionAvailable(wildcardSubscriptions) + .subscriptionIdsAvailable((byte) 0) .build(); propertiesSize = mqttProperty.limit(); } - if (subscriptionIdentifiers == 0) + if (!sharedSubscriptionAvailable(capabilities)) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) - .subscriptionIdsAvailable(subscriptionIdentifiers) + .sharedSubscriptionAvailable((byte) 0) .build(); propertiesSize = mqttProperty.limit(); } - if (sharedSubscriptions == 0) + if (!wildcardAvailable(capabilities)) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) - .sharedSubscriptionAvailable(sharedSubscriptions) + .wildcardSubscriptionAvailable((byte) 0) .build(); propertiesSize = mqttProperty.limit(); } @@ -3037,6 +3079,7 @@ private final class Subscription private String filter; private int qos; private int flags; + private int reasonCode; private boolean retainAsPublished() { @@ -3056,13 +3099,13 @@ public boolean equals(Object obj) } Subscription other = (Subscription) obj; return this.id == other.id && Objects.equals(this.filter, other.filter) && - this.qos == other.qos && this.flags == other.flags; + this.qos == other.qos && this.flags == other.flags && this.reasonCode == other.reasonCode; } @Override public int hashCode() { - return Objects.hash(this.id, this.filter, this.qos, this.flags); + return Objects.hash(this.id, this.filter, this.qos, this.flags, this.reasonCode); } } @@ -3144,10 +3187,6 @@ private void onSession( final ResetFW reset = resetRO.wrap(buffer, index, index + length); onSessionReset(reset); break; - case SignalFW.TYPE_ID: - final SignalFW signal = signalRO.wrap(buffer, index, index + length); - onSessionSignal(signal); - break; } } @@ -3213,15 +3252,19 @@ private void onSessionReset( if (mqttResetEx != null) { String16FW serverRef = mqttResetEx.serverRef(); - boolean serverRefExists = serverRef != null; + byte reasonCode = (byte) mqttResetEx.reasonCode(); + boolean serverRefExists = serverRef != null && serverRef.asString() != null; - byte reasonCode = serverRefExists ? SERVER_MOVED : SESSION_TAKEN_OVER; + if (reasonCode == SUCCESS) + { + reasonCode = serverRefExists ? SERVER_MOVED : SESSION_TAKEN_OVER; + } if (!connected) { doCancelConnectTimeout(); doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, - false, serverRef); + false, serverRefExists ? serverRef : null); } else { @@ -3229,23 +3272,10 @@ private void onSessionReset( } } setInitialClosed(); - decodeNetwork(traceId); cleanupAbort(traceId); } - private void onSessionSignal( - SignalFW signal) - { - final int signalId = signal.signalId(); - - switch (signalId) - { - default: - break; - } - } - private void onSessionBegin( BeginFW begin) { @@ -3262,6 +3292,9 @@ private void onSessionBegin( final MqttSessionBeginExFW mqttSessionBeginEx = mqttBeginEx.session(); sessionExpiry = mqttSessionBeginEx.expiry(); + capabilities = mqttSessionBeginEx.capabilities(); + maximumQos = mqttSessionBeginEx.qosMax(); + maximumPacketSize = (int) mqttSessionBeginEx.packetSizeMax(); } doSessionWindow(traceId, encodeSlotOffset, encodeBudgetMax); @@ -3322,6 +3355,8 @@ private void onSessionData( subscription.flags = filter.flags(); subscriptions.add(subscription); }); + int packetId = subscribePacketIds.get(subscriptions.get(0).id); + subscriptions.forEach(sub -> subscribePacketIds.remove(sub.id)); openSubscribeStreams(packetId, traceId, authorization, subscriptions, true); sessionPresent = true; } @@ -3340,6 +3375,7 @@ private void onSessionData( subscription.id = (int) filter.subscriptionId(); subscription.filter = filter.pattern().asString(); subscription.flags = filter.flags(); + subscription.reasonCode = filter.reasonCode(); newState.add(subscription); }); List currentSubscriptions = sessionStream.subscriptions(); @@ -3362,25 +3398,25 @@ private void onSessionData( } else { - List removedFilters = currentSubscriptions.stream() + final List unsubscribedFilters = currentSubscriptions.stream() .filter(s -> !newState.contains(s)) .map(s -> s.filter) .collect(Collectors.toList()); - if (!removedFilters.isEmpty()) + if (!unsubscribedFilters.isEmpty()) { - Map> packetIdToFilters = removedFilters.stream() + Map> packetIdToFilters = unsubscribedFilters.stream() .filter(unsubscribePacketIds::containsKey) .collect(Collectors.groupingBy(unsubscribePacketIds::remove, Collectors.toList())); if (!packetIdToFilters.isEmpty()) { packetIdToFilters.forEach((unsubscribePacketId, filters) -> - sendUnsuback(unsubscribePacketId, traceId, authorization, filters, false)); + sendUnsuback(unsubscribePacketId, traceId, authorization, filters, newState, false)); } else { - sendUnsuback(packetId, traceId, authorization, removedFilters, true); + sendUnsuback(packetId, traceId, authorization, unsubscribedFilters, newState, true); } } } @@ -3615,12 +3651,10 @@ private void doPublishBegin( final MqttBeginExFW beginEx = mqttPublishBeginExRW.wrap(extBuffer, 0, extBuffer.capacity()) .typeId(mqttTypeId) - .publish(publishBuilder -> - { - publishBuilder.clientId(clientId); - publishBuilder.topic(topic); - publishBuilder.flags(retainedMessages); - }) + .publish(p -> + p.clientId(clientId) + .topic(topic) + .flags(retainAvailable(capabilities) ? 1 : 0)) .build(); application = newStream(this::onPublish, originId, routedId, initialId, initialSeq, initialAck, initialMax, @@ -4000,9 +4034,14 @@ private class MqttSubscribeStream this.adminSubscribe = adminSubscribe; } - private Optional getSubscriptionByFilter(String filter) + private Optional getSubscriptionByFilter( + String filter, + List newState) { - return subscriptions.stream().filter(s -> s.filter.equals(filter)).findFirst(); + return Optional.ofNullable(newState) + .flatMap(list -> list.stream().filter(s -> s.filter.equals(filter)).findFirst()) + .or(() -> subscriptions.stream().filter(s -> s.filter.equals(filter)).findFirst()); + } private void doSubscribeBeginOrFlush( @@ -4020,7 +4059,7 @@ private void doSubscribeBeginOrFlush( } else { - doSubscribeFlush(traceId, 0, subscriptions); + doSubscribeFlush(traceId, 0); } } @@ -4055,8 +4094,7 @@ private void doSubscribeBegin( private void doSubscribeFlush( long traceId, - int reserved, - List newSubscriptions) + int reserved) { doFlush(application, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, sessionId, 0L, reserved, @@ -4074,35 +4112,28 @@ private void doSubscribeFlush( .build() .sizeof())); - if (newSubscriptions != null && !newSubscriptions.isEmpty()) - { - // TODO: do we get back anything after we send a flush? - // Should we say it's a success right after we sent the flush? - final byte[] subscriptionPayload = new byte[newSubscriptions.size()]; - for (int i = 0; i < subscriptionPayload.length; i++) - { - subscriptionPayload[i] = SUCCESS; - } - - doEncodeSuback(traceId, sessionId, packetId, subscriptionPayload); - } - initialSeq += reserved; assert initialSeq <= initialAck + initialMax; } private void doSubscribeFlushOrEnd( long traceId, - List unsubscribedPatterns) + List unsubscribed) { - this.subscriptions.removeIf(subscription -> unsubscribedPatterns.contains(subscription.filter)); + for (Subscription subscription : unsubscribed) + { + if (subscription.reasonCode == SUCCESS) + { + this.subscriptions.remove(subscription); + } + } if (!MqttState.initialOpened(state)) { state = MqttState.closingInitial(state); } else { - doSubscribeFlush(traceId, 0, null); + doSubscribeFlush(traceId, 0); } } @@ -4263,19 +4294,7 @@ private void onSubscribeWindow( if (!subscriptions.isEmpty() && !adminSubscribe) { - final byte[] subscriptionPayload = new byte[subscriptions.size()]; - //TODO: if we get back the window, can it be anything else than success? I think yes, and we only need to - // recognize reject scenarios when doing the decodeSubscribe - for (int i = 0; i < subscriptionPayload.length; i++) - { - subscriptionPayload[i] = SUCCESS; - } - - if (!MqttState.initialOpened(state)) - { - doEncodeSuback(traceId, authorization, packetId, subscriptionPayload); - } - if (session && !sessionStream.deferredUnsubscribes.isEmpty()) + if (!sessionStream.deferredUnsubscribes.isEmpty()) { Iterator>> iterator = sessionStream.deferredUnsubscribes.entrySet().iterator(); @@ -4419,6 +4438,30 @@ private static boolean invalidWillQos( return (flags & WILL_QOS_MASK) == WILL_QOS_MASK; } + private static boolean retainAvailable( + int capabilities) + { + return (capabilities & RETAIN_AVAILABLE_MASK) != 0; + } + + private static boolean wildcardAvailable( + int capabilities) + { + return (capabilities & WILDCARD_AVAILABLE_MASK) != 0; + } + + private static boolean subscriptionIdsAvailable( + int capabilities) + { + return (capabilities & SUBSCRIPTION_IDS_AVAILABLE_MASK) != 0; + } + + private static boolean sharedSubscriptionAvailable( + int capabilities) + { + return (capabilities & SHARED_SUBSCRIPTIONS_AVAILABLE_MASK) != 0; + } + private static boolean isSetWillQos( int flags) { @@ -4610,21 +4653,11 @@ private int decode( break decode; } final byte qos = (byte) ((flags & WILL_QOS_MASK) >>> 3); - if (qos != 0 && qos <= maximumQos) + if (qos != 0) { willQos = (byte) (qos << 1); } - if (isSetWillRetain(flags)) - { - if (retainedMessages == 0) - { - reasonCode = RETAIN_NOT_SUPPORTED; - break decode; - } - willRetain = (byte) RETAIN_FLAG; - } - willProperties = mqttWill.properties(); decode(willProperties); @@ -4782,90 +4815,80 @@ private int decode( qos = calculatePublishApplicationQos(typeAndFlags); int alias = 0; - if (qos > maximumQos) - { - reasonCode = QOS_NOT_SUPPORTED; - } - else if (retained && retainedMessages == 0) - { - reasonCode = RETAIN_NOT_SUPPORTED; - } - else + + decode: + for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) { - decode: - for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) + final MqttPropertyFW mqttProperty = mqttPropertyRO.wrap(decodeBuffer, decodeProgress, decodeLimit); + switch (mqttProperty.kind()) { - final MqttPropertyFW mqttProperty = mqttPropertyRO.wrap(decodeBuffer, decodeProgress, decodeLimit); - switch (mqttProperty.kind()) + case KIND_EXPIRY_INTERVAL: + expiryInterval = mqttProperty.expiryInterval(); + break; + case KIND_CONTENT_TYPE: + final String16FW mContentType = mqttProperty.contentType(); + if (mContentType.value() != null) { - case KIND_EXPIRY_INTERVAL: - expiryInterval = mqttProperty.expiryInterval(); - break; - case KIND_CONTENT_TYPE: - final String16FW mContentType = mqttProperty.contentType(); - if (mContentType.value() != null) - { - final int offset = mContentType.offset(); - final int limit = mContentType.limit(); + final int offset = mContentType.offset(); + final int limit = mContentType.limit(); - contentType = contentTypeRO.wrap(mContentType.buffer(), offset, limit); - } - break; - case KIND_TOPIC_ALIAS: - if (alias != 0) - { - reasonCode = PROTOCOL_ERROR; - break decode; - } + contentType = contentTypeRO.wrap(mContentType.buffer(), offset, limit); + } + break; + case KIND_TOPIC_ALIAS: + if (alias != 0) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } - alias = mqttProperty.topicAlias() & 0xFFFF; + alias = mqttProperty.topicAlias() & 0xFFFF; - if (alias <= 0 || alias > server.topicAliasMaximum) - { - reasonCode = TOPIC_ALIAS_INVALID; - break decode; - } + if (alias <= 0 || alias > server.topicAliasMaximum) + { + reasonCode = TOPIC_ALIAS_INVALID; + break decode; + } - if (topic.isEmpty()) - { - if (!server.topicAliases.containsKey(alias)) - { - reasonCode = PROTOCOL_ERROR; - break decode; - } - topic = server.topicAliases.get(alias); - } - else + if (topic.isEmpty()) + { + if (!server.topicAliases.containsKey(alias)) { - server.topicAliases.put(alias, topic); + reasonCode = PROTOCOL_ERROR; + break decode; } - break; - case KIND_PAYLOAD_FORMAT: - payloadFormat = MqttPayloadFormat.valueOf(mqttProperty.payloadFormat()); - break; - case KIND_RESPONSE_TOPIC: - final String16FW mResponseTopic = mqttProperty.responseTopic(); - if (mResponseTopic.value() != null) - { - final int offset = mResponseTopic.offset(); - final int limit = mResponseTopic.limit(); + topic = server.topicAliases.get(alias); + } + else + { + server.topicAliases.put(alias, topic); + } + break; + case KIND_PAYLOAD_FORMAT: + payloadFormat = MqttPayloadFormat.valueOf(mqttProperty.payloadFormat()); + break; + case KIND_RESPONSE_TOPIC: + final String16FW mResponseTopic = mqttProperty.responseTopic(); + if (mResponseTopic.value() != null) + { + final int offset = mResponseTopic.offset(); + final int limit = mResponseTopic.limit(); - responseTopic = responseTopicRO.wrap(mResponseTopic.buffer(), offset, limit); - } - break; - case KIND_CORRELATION_DATA: - correlationData = mqttProperty.correlationData().bytes(); - break; - case KIND_USER_PROPERTY: - final MqttUserPropertyFW userProperty = mqttProperty.userProperty(); - userPropertiesRW.item(c -> c.key(userProperty.key()).value(userProperty.value())); - break; - default: - reasonCode = MALFORMED_PACKET; - break decode; + responseTopic = responseTopicRO.wrap(mResponseTopic.buffer(), offset, limit); } - decodeProgress = mqttProperty.limit(); + break; + case KIND_CORRELATION_DATA: + correlationData = mqttProperty.correlationData().bytes(); + break; + case KIND_USER_PROPERTY: + final MqttUserPropertyFW userProperty = mqttProperty.userProperty(); + userPropertiesRW.item(c -> c.key(userProperty.key()).value(userProperty.value())); + break; + default: + reasonCode = MALFORMED_PACKET; + break decode; } + decodeProgress = mqttProperty.limit(); } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java index 3a3859e683..f31a8246d9 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java @@ -24,12 +24,11 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.RETAIN_AVAILABLE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SERVER_REFERENCE; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSIONS_AVAILABLE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSION_EXPIRY_GRACE_PERIOD; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SHARED_SUBSCRIPTION_AVAILABLE; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SUBSCRIPTION_IDENTIFIERS_AVAILABLE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SHARED_SUBSCRIPTION; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SUBSCRIPTION_IDENTIFIERS; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.TOPIC_ALIAS_MAXIMUM; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.WILDCARD_SUBSCRIPTION_AVAILABLE; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.WILDCARD_SUBSCRIPTION; import static org.junit.Assert.assertEquals; import org.junit.Test; @@ -46,14 +45,13 @@ public class MqttConfigurationTest public static final String WILDCARD_SUBSCRIPTION_AVAILABLE_NAME = "zilla.binding.mqtt.wildcard.subscription.available"; public static final String SUBSCRIPTION_IDENTIFIERS_AVAILABLE_NAME = "zilla.binding.mqtt.subscription.identifiers.available"; public static final String SHARED_SUBSCRIPTION_AVAILABLE_NAME = "zilla.binding.mqtt.shared.subscription.available"; - public static final String SESSION_AVAILABLE_NAME = "zilla.binding.mqtt.sessions.available"; public static final String NO_LOCAL_NAME = "zilla.binding.mqtt.no.local"; public static final String SESSION_EXPIRY_GRACE_PERIOD_NAME = "zilla.binding.mqtt.session.expiry.grace.period"; public static final String CLIENT_ID_NAME = "zilla.binding.mqtt.client.id"; public static final String SERVER_REFERENCE_NAME = "zilla.binding.mqtt.server.reference"; @Test - public void shouldVerifyConstants() throws Exception + public void shouldVerifyConstants() { assertEquals(PUBLISH_TIMEOUT.name(), PUBLISH_TIMEOUT_NAME); assertEquals(CONNECT_TIMEOUT.name(), CONNECT_TIMEOUT_NAME); @@ -62,10 +60,9 @@ public void shouldVerifyConstants() throws Exception assertEquals(MAXIMUM_QOS.name(), MAXIMUM_QOS_NAME); assertEquals(RETAIN_AVAILABLE.name(), RETAIN_AVAILABLE_NAME); assertEquals(TOPIC_ALIAS_MAXIMUM.name(), TOPIC_ALIAS_MAXIMUM_NAME); - assertEquals(WILDCARD_SUBSCRIPTION_AVAILABLE.name(), WILDCARD_SUBSCRIPTION_AVAILABLE_NAME); - assertEquals(SUBSCRIPTION_IDENTIFIERS_AVAILABLE.name(), SUBSCRIPTION_IDENTIFIERS_AVAILABLE_NAME); - assertEquals(SHARED_SUBSCRIPTION_AVAILABLE.name(), SHARED_SUBSCRIPTION_AVAILABLE_NAME); - assertEquals(SESSIONS_AVAILABLE.name(), SESSION_AVAILABLE_NAME); + assertEquals(WILDCARD_SUBSCRIPTION.name(), WILDCARD_SUBSCRIPTION_AVAILABLE_NAME); + assertEquals(SUBSCRIPTION_IDENTIFIERS.name(), SUBSCRIPTION_IDENTIFIERS_AVAILABLE_NAME); + assertEquals(SHARED_SUBSCRIPTION.name(), SHARED_SUBSCRIPTION_AVAILABLE_NAME); assertEquals(NO_LOCAL.name(), NO_LOCAL_NAME); assertEquals(SESSION_EXPIRY_GRACE_PERIOD.name(), SESSION_EXPIRY_GRACE_PERIOD_NAME); assertEquals(CLIENT_ID.name(), CLIENT_ID_NAME); diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java new file mode 100644 index 0000000000..3ebb5051af --- /dev/null +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java @@ -0,0 +1,115 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.client; + +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class ConnectionIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/mqtt/streams/network") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/mqtt/streams/application"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(PUBLISH_TIMEOUT, 1L) + .configure(ENGINE_DRAIN_ON_CLOSE, false) + .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/client.sent.abort/server", + "${app}/client.sent.abort/client"}) + public void shouldReceiveClientSentAbort() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/session.will.message.retain/server", + "${app}/session.will.message.retain/client"}) + public void shouldConnectWithWillMessageRetain() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/connect.non.successful.connack/server", + "${app}/connect.non.successful.connack/client"}) + public void shouldResetWithReasonCodeOnNonSuccessfulConnack() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/connect.non.successful.disconnect/server", + "${app}/connect.non.successful.disconnect/client"}) + public void shouldResetWithReasonCodeOnNonSuccessfulDisconnect() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/disconnect.after.subscribe.and.publish/server", + "${app}/disconnect.after.subscribe.and.publish/client"}) + public void shouldDisconnectAfterSubscribeAndPublish() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/connect.delegate.connack.properties/server", + "${app}/connect.delegate.connack.properties/client"}) + public void shouldDelegateConnackProperties() throws Exception + { + k3po.finish(); + } +} diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java new file mode 100644 index 0000000000..32a0b402cf --- /dev/null +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java @@ -0,0 +1,79 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.client; + +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class PingIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/mqtt/streams/network") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/mqtt/streams/application"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(PUBLISH_TIMEOUT, 1L) + .configure(ENGINE_DRAIN_ON_CLOSE, false) + .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/ping.server.override.keep.alive/server", + "${app}/session.connect/client"}) + public void shouldConnectThenPingRequestResponse() throws Exception + { + k3po.start(); + Thread.sleep(2500); + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/ping.no.pingresp/server", + "${app}/session.connect.abort/client"}) + public void shouldCloseWhenPingRequestNoResponseInTimeout() throws Exception + { + k3po.start(); + Thread.sleep(2000); + k3po.finish(); + } +} diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java new file mode 100644 index 0000000000..7521710e67 --- /dev/null +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java @@ -0,0 +1,137 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.client; + +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class PublishIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/mqtt/streams/network") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/mqtt/streams/application"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(PUBLISH_TIMEOUT, 1L) + .configure(ENGINE_DRAIN_ON_CLOSE, false) + .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + + //TODO: adapt this once we decided + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/publish.empty.message/server", + "${app}/publish.empty.message/client"}) + public void shouldSendEmptyMessage() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/publish.empty.retained.message/server", + "${app}/publish.empty.retained.message/client"}) + public void shouldSendEmptyRetainedMessage() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/publish.multiple.messages/server", + "${app}/publish.multiple.messages/client"}) + public void shouldSendMultipleMessages() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/publish.one.message/server", + "${app}/publish.one.message/client"}) + public void shouldSendOneMessage() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/publish.retained/server", + "${app}/publish.retained/client"}) + public void shouldPublishRetainedMessage() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/publish.with.user.properties.distinct/server", + "${app}/publish.with.user.properties.distinct/client"}) + public void shouldSendWithDistinctUserProperties() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/publish.with.user.properties.repeated/server", + "${app}/publish.with.user.properties.repeated/client"}) + public void shouldSendWithRepeatedUserProperties() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/publish.with.user.property/server", + "${app}/publish.with.user.property/client"}) + public void shouldSendWithUserProperty() throws Exception + { + k3po.finish(); + } +} diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java new file mode 100644 index 0000000000..585ec9a9a1 --- /dev/null +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java @@ -0,0 +1,315 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.client; + +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class SubscribeIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/mqtt/streams/network") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/mqtt/streams/application"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(PUBLISH_TIMEOUT, 1L) + .configure(ENGINE_DRAIN_ON_CLOSE, false) + .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/session.subscribe/server", + "${app}/session.subscribe/client"}) + public void shouldSubscribe() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.one.message/server", + "${app}/subscribe.one.message/client"}) + public void shouldReceiveOneMessage() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.retain.as.published/server", + "${app}/subscribe.retain.as.published/client"}) + public void shouldReceiveRetainAsPublished() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.one.message.receive.response.topic.and.correlation.data/server", + "${app}/subscribe.one.message.receive.response.topic.and.correlation.data/client"}) + public void shouldReceiveCorrelationData() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.one.message.user.properties.unaltered/server", + "${app}/subscribe.one.message.user.properties.unaltered/client"}) + public void shouldReceiveOneMessageWithUserPropertiesUnaltered() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.publish.no.local/server", + "${app}/subscribe.publish.no.local/client"}) + public void shouldNotReceivePublishLocal() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.qos0.publish.retained.no.replay/server", + "${app}/subscribe.qos0.publish.retained.no.replay/client"}) + public void shouldNotReplayRetained() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.qos0.replay.retained.no.packet.id/server", + "${app}/subscribe.qos0.replay.retained.no.packet.id/client"}) + public void shouldReceiveAndReplayRetainedQos0() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.receive.message/server", + "${app}/subscribe.receive.message/client"}) + public void shouldReceiveOneMessageAfterPublish() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.receive.messages.topic.alias.repeated/server", + "${app}/subscribe.receive.messages.topic.alias.repeated/client"}) + public void shouldReceiveMessagesTopicAliasRepeated() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.receive.message.overlapping.wildcard/server", + "${app}/subscribe.receive.message.overlapping.wildcard/client"}) + public void shouldReceiveMessageOverlappingWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.receive.message.wildcard/server", + "${app}/subscribe.receive.message.wildcard/client"}) + public void shouldReceiveOneMessageWithPatternTopic() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filter.multi.level.wildcard/server", + "${app}/subscribe.topic.filter.multi.level.wildcard/client"}) + public void shouldFilterMultiLevelWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filter.single.and.multi.level.wildcard/server", + "${app}/subscribe.topic.filter.single.and.multi.level.wildcard/client"}) + public void shouldFilterSingleAndMultiLevelWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filter.single.exact/server", + "${app}/subscribe.topic.filter.single.exact/client"}) + public void shouldFilterExact() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filter.two.single.level.wildcard/server", + "${app}/subscribe.topic.filter.two.single.level.wildcard/client"}) + public void shouldFilterTwoSingleLevelWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filters.aggregated.both.exact/server", + "${app}/subscribe.topic.filters.aggregated.both.exact/client"}) + public void shouldFilterAggregatedBothExact() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filters.aggregated.exact.and.wildcard/server", + "${app}/subscribe.topic.filters.aggregated.exact.and.wildcard/client"}) + public void shouldFilterAggregatedExactAndWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filter.single.level.wildcard/server", + "${app}/subscribe.topic.filter.single.level.wildcard/client"}) + public void shouldFilterSingleLevelWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filters.disjoint.wildcards/server", + "${app}/subscribe.topic.filters.disjoint.wildcards/client"}) + public void shouldFilterDisjointWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filters.isolated.both.exact/server", + "${app}/subscribe.topic.filters.isolated.both.exact/client"}) + public void shouldFilterIsolatedBothExact() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filters.isolated.both.wildcard/server", + "${app}/subscribe.topic.filters.isolated.both.wildcard/client"}) + public void shouldFilterIsolatedBothWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filters.isolated.exact.and.wildcard/server", + "${app}/subscribe.topic.filters.isolated.exact.and.wildcard/client"}) + public void shouldFilterIsolatedExactAndWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filters.overlapping.wildcards/server", + "${app}/subscribe.topic.filters.overlapping.wildcards/client"}) + public void shouldFilterOverlappingWildcard() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.topic.filters.non.successful/server", + "${app}/subscribe.topic.filters.non.successful/client"}) + public void shouldFilterNonSuccessful() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/subscribe.reconnect.publish.no.subscription/server", + "${app}/subscribe.reconnect.publish.no.subscription/client"}) + public void shouldReceiveReconnectNoSubscription() throws Exception + { + k3po.finish(); + } +} diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java new file mode 100644 index 0000000000..54ddbe6c80 --- /dev/null +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java @@ -0,0 +1,95 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.client; + +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; +import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.rules.RuleChain.outerRule; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.DisableOnDebug; +import org.junit.rules.TestRule; +import org.junit.rules.Timeout; +import org.kaazing.k3po.junit.annotation.Specification; +import org.kaazing.k3po.junit.rules.K3poRule; + +import io.aklivity.zilla.runtime.engine.test.EngineRule; +import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; + +public class UnsubscribeIT +{ + private final K3poRule k3po = new K3poRule() + .addScriptRoot("net", "io/aklivity/zilla/specs/binding/mqtt/streams/network") + .addScriptRoot("app", "io/aklivity/zilla/specs/binding/mqtt/streams/application"); + + private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS)); + + private final EngineRule engine = new EngineRule() + .directory("target/zilla-itests") + .commandBufferCapacity(1024) + .responseBufferCapacity(1024) + .counterValuesBufferCapacity(8192) + .configure(PUBLISH_TIMEOUT, 1L) + .configure(ENGINE_DRAIN_ON_CLOSE, false) + .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") + .external("net0") + .clean(); + + @Rule + public final TestRule chain = outerRule(engine).around(k3po).around(timeout); + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/unsubscribe.topic.filter.single/server", + "${app}/unsubscribe.topic.filter.single/client"}) + public void shouldAcknowledgeSingleTopicFilters() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/unsubscribe.after.subscribe/server", + "${app}/unsubscribe.after.subscribe/client"}) + public void shouldAcknowledge() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/unsubscribe.aggregated.topic.filters.both.exact/server", + "${app}/unsubscribe.aggregated.topic.filters.both.exact/client"}) + public void shouldAcknowledgeAggregatedTopicFiltersBothExact() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("client.yaml") + @Specification({ + "${net}/unsubscribe.topic.filters.non.successful/server", + "${app}/unsubscribe.topic.filters.non.successful/client"}) + public void shouldAcknowledgeNonSuccessful() throws Exception + { + k3po.finish(); + } +} diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java similarity index 58% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java rename to incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java index baf4cdd51c..d0636e005c 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/ConnectionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java @@ -13,15 +13,12 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.server; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.CLIENT_ID_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.CONNECT_TIMEOUT_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.KEEP_ALIVE_MINIMUM_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.RETAIN_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static io.aklivity.zilla.runtime.engine.test.EngineRule.ENGINE_BUFFER_SLOT_CAPACITY_NAME; import static java.util.concurrent.TimeUnit.SECONDS; @@ -65,10 +62,8 @@ public class ConnectionIT @Test @Configuration("server.yaml") @Specification({ - "${net}/connect.successful/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/connect.successful/client", + "${app}/session.connect/server"}) public void shouldConnect() throws Exception { k3po.finish(); @@ -78,10 +73,7 @@ public void shouldConnect() throws Exception @Configuration("server.credentials.username.yaml") @Specification({ "${net}/connect.username.authentication.successful/client", - "${app}/connect.authorize.publish.one.message/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${app}/session.connect/server"}) public void shouldAuthenticateUsernameAndConnect() throws Exception { k3po.finish(); @@ -91,9 +83,6 @@ public void shouldAuthenticateUsernameAndConnect() throws Exception @Configuration("server.credentials.username.yaml") @Specification({ "${net}/connect.username.authentication.failed/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFailUsernameAuthentication() throws Exception { k3po.finish(); @@ -102,10 +91,8 @@ public void shouldFailUsernameAuthentication() throws Exception @Test @Configuration("server.credentials.password.yaml") @Specification({ - "${net}/connect.password.authentication.successful/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/connect.password.authentication.successful/client", + "${app}/session.connect/server"}) public void shouldAuthenticatePasswordAndConnect() throws Exception { k3po.finish(); @@ -115,9 +102,6 @@ public void shouldAuthenticatePasswordAndConnect() throws Exception @Configuration("server.credentials.password.yaml") @Specification({ "${net}/connect.password.authentication.failed/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFailPasswordAuthentication() throws Exception { k3po.finish(); @@ -126,10 +110,9 @@ public void shouldFailPasswordAuthentication() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/connect.server.assigned.client.id/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/connect.server.assigned.client.id/client", + "${app}/session.connect/server"}) + @Configure(name = CLIENT_ID_NAME, value = "client") public void shouldConnectWithServerAssignedClientId() throws Exception { k3po.finish(); @@ -139,9 +122,6 @@ public void shouldConnectWithServerAssignedClientId() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.missing.client.id/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectMissingClientId() throws Exception { k3po.finish(); @@ -150,23 +130,28 @@ public void shouldRejectMissingClientId() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/disconnect/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/disconnect/client", + "${app}/session.connect/server"}) public void shouldConnectThenDisconnect() throws Exception { k3po.finish(); } + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/disconnect.no.reasoncode.no.properties/client", + "${app}/session.connect/server"}) + public void shouldConnectThenDisconnectWithNoReasonCodeNoProperties() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.yaml") @Specification({ "${net}/disconnect.after.subscribe.and.publish/client", "${app}/disconnect.after.subscribe.and.publish/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldDisconnectAfterSubscribeAndPublish() throws Exception { k3po.finish(); @@ -177,9 +162,6 @@ public void shouldDisconnectAfterSubscribeAndPublish() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.invalid.protocol.version/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectInvalidProtocolVersion() throws Exception { k3po.finish(); @@ -189,9 +171,6 @@ public void shouldRejectInvalidProtocolVersion() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.invalid.flags/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectMalformedConnectPacket() throws Exception { k3po.finish(); @@ -201,9 +180,6 @@ public void shouldRejectMalformedConnectPacket() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.invalid.authentication.method/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectBadAuthenticationMethod() throws Exception { k3po.finish(); @@ -212,10 +188,8 @@ public void shouldRejectBadAuthenticationMethod() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/disconnect.reject.invalid.fixed.header.flags/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/disconnect.reject.invalid.fixed.header.flags/client", + "${app}/session.connect/server"}) public void shouldRejectMalformedDisconnectPacket() throws Exception { k3po.finish(); @@ -225,10 +199,8 @@ public void shouldRejectMalformedDisconnectPacket() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/connect.reject.second.connect/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/connect.reject.second.connect/client", + "${app}/session.connect/server"}) public void shouldRejectSecondConnectPacket() throws Exception { k3po.finish(); @@ -237,10 +209,8 @@ public void shouldRejectSecondConnectPacket() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/connect.successful.fragmented/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/connect.successful.fragmented/client", + "${app}/session.connect/server"}) public void shouldConnectFragmented() throws Exception { k3po.finish(); @@ -251,9 +221,6 @@ public void shouldConnectFragmented() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.other.packet.before.connect/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectOtherPacketBeforeConnect() throws Exception { k3po.finish(); @@ -263,9 +230,6 @@ public void shouldRejectOtherPacketBeforeConnect() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.topic.alias.maximum.repeated/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectConnectWhenTopicAliasMaximumRepeated() throws Exception { k3po.finish(); @@ -276,9 +240,6 @@ public void shouldRejectConnectWhenTopicAliasMaximumRepeated() throws Exception @Specification({ "${net}/client.sent.close/client", "${app}/client.sent.abort/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveClientSentClose() throws Exception { k3po.finish(); @@ -289,9 +250,6 @@ public void shouldReceiveClientSentClose() throws Exception @Specification({ "${net}/client.sent.abort/client", "${app}/client.sent.abort/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveClientSentAbort() throws Exception { k3po.finish(); @@ -302,9 +260,6 @@ public void shouldReceiveClientSentAbort() throws Exception @Specification({ "${net}/client.sent.reset/client", "${app}/client.sent.abort/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveClientSentReset() throws Exception { k3po.finish(); @@ -313,10 +268,8 @@ public void shouldReceiveClientSentReset() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/disconnect.after.keep.alive.timeout/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/disconnect.after.keep.alive.timeout/client", + "${app}/session.connect/server"}) @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "1") public void shouldDisconnectClientAfterKeepAliveTimeout() throws Exception { @@ -327,7 +280,6 @@ public void shouldDisconnectClientAfterKeepAliveTimeout() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.timeout.before.connect/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") @Configure(name = CONNECT_TIMEOUT_NAME, value = "1") public void shouldTimeoutBeforeConnect() throws Exception { @@ -337,9 +289,8 @@ public void shouldTimeoutBeforeConnect() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/connect.maximum.qos.0/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") + "${net}/connect.maximum.qos.0/client", + "${app}/connect.maximum.qos.0/server"}) public void shouldConnectWithMaximumQos0() throws Exception { k3po.finish(); @@ -348,11 +299,8 @@ public void shouldConnectWithMaximumQos0() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/connect.retain.not.supported/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = RETAIN_AVAILABLE_NAME, value = "false") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/connect.retain.not.supported/client", + "${app}/connect.retain.not.supported/server"}) public void shouldConnectWithRetainNotSupported() throws Exception { k3po.finish(); @@ -361,11 +309,8 @@ public void shouldConnectWithRetainNotSupported() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/connect.reject.will.retain.not.supported/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = RETAIN_AVAILABLE_NAME, value = "false") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/connect.reject.will.retain.not.supported/client", + "${app}/connect.reject.will.retain.not.supported/server"}) public void shouldRejectConnectWillRetainNotSupported() throws Exception { k3po.finish(); @@ -375,9 +320,6 @@ public void shouldRejectConnectWillRetainNotSupported() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.password.flag.no.password/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectConnectWithPasswordFlagSetNoPassword() throws Exception { k3po.finish(); @@ -388,9 +330,6 @@ public void shouldRejectConnectWithPasswordFlagSetNoPassword() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.password.no.password.flag/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectConnectWithPasswordNoPasswordFlag() throws Exception { k3po.finish(); @@ -400,9 +339,6 @@ public void shouldRejectConnectWithPasswordNoPasswordFlag() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.username.flag.only/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectConnectWithUsernameFlagNoUsername() throws Exception { k3po.finish(); @@ -413,9 +349,6 @@ public void shouldRejectConnectWithUsernameFlagNoUsername() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.username.flag.missing/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectConnectWithUsernameNoUsernameFlag() throws Exception { k3po.finish(); @@ -425,9 +358,6 @@ public void shouldRejectConnectWithUsernameNoUsernameFlag() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.will.payload.missing/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectConnectWillPayloadMissing() throws Exception { k3po.finish(); @@ -437,9 +367,6 @@ public void shouldRejectConnectWillPayloadMissing() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.will.properties.missing/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectConnectWillPropertiesMissing() throws Exception { k3po.finish(); @@ -449,9 +376,6 @@ public void shouldRejectConnectWillPropertiesMissing() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.will.topic.missing/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectConnectWillTopicMissing() throws Exception { k3po.finish(); @@ -461,9 +385,6 @@ public void shouldRejectConnectWillTopicMissing() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.will.invalid.will.qos/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectInvalidWillQos() throws Exception { k3po.finish(); @@ -473,8 +394,6 @@ public void shouldRejectInvalidWillQos() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.will.reject.will.qos.1.without.will.flag/client"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectWillQos1WithoutWillFlag() throws Exception { k3po.finish(); @@ -484,9 +403,6 @@ public void shouldRejectWillQos1WithoutWillFlag() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.will.reject.will.qos.2.without.will.flag/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectWillQos2WithoutWillFlag() throws Exception { k3po.finish(); @@ -496,9 +412,6 @@ public void shouldRejectWillQos2WithoutWillFlag() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.will.reject.will.retain.without.will.flag/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectWillRetainWithoutWillFlag() throws Exception { k3po.finish(); @@ -509,9 +422,6 @@ public void shouldRejectWillRetainWithoutWillFlag() throws Exception @Specification({ "${net}/connect.max.packet.size.exceeded/client", "${app}/connect.max.packet.size.exceeded/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldIgnorePublishPacketBiggerThanMaxPacketSize() throws Exception { k3po.finish(); @@ -520,10 +430,8 @@ public void shouldIgnorePublishPacketBiggerThanMaxPacketSize() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/connect.server.defined.keep.alive/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/connect.server.defined.keep.alive/client", + "${app}/session.connect/server"}) @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "10") public void shouldConnectWithServerDefinedKeepAlive() throws Exception { @@ -538,10 +446,6 @@ public void shouldConnectWithServerDefinedKeepAlive() throws Exception @Specification({ "${net}/connect.subscribe.unfragmented/client", "${app}/subscribe.topic.filter.single.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "10") public void shouldConnectAndSubscribeUnfragmented() throws Exception { k3po.finish(); @@ -551,9 +455,6 @@ public void shouldConnectAndSubscribeUnfragmented() throws Exception @Configuration("server.yaml") @Specification({ "${net}/connect.reject.packet.too.large/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = ENGINE_BUFFER_SLOT_CAPACITY_NAME, value = "8192") public void shouldRejectPacketTooLarge() throws Exception { @@ -563,12 +464,30 @@ public void shouldRejectPacketTooLarge() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/disconnect.invalid.session.expiry/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/disconnect.invalid.session.expiry/client", + "${app}/session.connect/server"}) public void shouldRejectInvalidSessionExpiryOnDisconnect() throws Exception { k3po.finish(); } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/connect.non.successful.connack/client", + "${app}/connect.non.successful.connack/server"}) + public void shouldResetWithReasonCodeOnNonSuccessfulConnack() throws Exception + { + k3po.finish(); + } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/connect.non.successful.disconnect/client", + "${app}/connect.non.successful.disconnect/server"}) + public void shouldResetWithReasonCodeOnNonSuccessfulDisconnect() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PingIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java similarity index 78% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PingIT.java rename to incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java index f58ff08a3e..d2e3dc352d 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PingIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java @@ -13,13 +13,10 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.server; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.KEEP_ALIVE_MINIMUM_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -61,10 +58,8 @@ public class PingIT @Test @Configuration("server.yaml") @Specification({ - "${net}/ping/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/ping/client", + "${app}/session.connect/server"}) public void shouldConnectThenPingRequestResponse() throws Exception { k3po.finish(); @@ -75,9 +70,6 @@ public void shouldConnectThenPingRequestResponse() throws Exception @Specification({ "${net}/ping.keep.alive/client", "${app}/subscribe.topic.filter.single.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "1") public void shouldPingAtKeepAliveInterval() throws Exception { diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java similarity index 59% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java rename to incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java index 78a377f0d7..c669347c6c 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/PublishIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java @@ -13,14 +13,10 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.server; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.PUBLISH_TIMEOUT_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.RETAIN_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.TOPIC_ALIAS_MAXIMUM_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static io.aklivity.zilla.runtime.engine.test.EngineRule.ENGINE_BUFFER_SLOT_CAPACITY_NAME; @@ -67,9 +63,6 @@ public class PublishIT @Specification({ "${net}/publish.one.message/client", "${app}/publish.one.message/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishOneMessage() throws Exception { k3po.finish(); @@ -80,9 +73,6 @@ public void shouldPublishOneMessage() throws Exception @Specification({ "${net}/publish.retained/client", "${app}/publish.retained/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishRetainedMessage() throws Exception { k3po.finish(); @@ -93,9 +83,6 @@ public void shouldPublishRetainedMessage() throws Exception @Specification({ "${net}/publish.message.with.topic.alias/client", "${app}/publish.message.with.topic.alias/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") public void shouldPublishMessageWithTopicAlias() throws Exception { @@ -107,9 +94,6 @@ public void shouldPublishMessageWithTopicAlias() throws Exception @Specification({ "${net}/publish.multiple.messages/client", "${app}/publish.multiple.messages/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishMultipleMessages() throws Exception { k3po.finish(); @@ -120,9 +104,6 @@ public void shouldPublishMultipleMessages() throws Exception @Specification({ "${net}/publish.multiple.messages.unfragmented/client", "${app}/publish.multiple.messages/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishMultipleMessagesUnfragmented() throws Exception { k3po.finish(); @@ -133,9 +114,6 @@ public void shouldPublishMultipleMessagesUnfragmented() throws Exception @Specification({ "${net}/publish.one.message.subscribe.unfragmented/client", "${app}/publish.one.message.subscribe.unfragmented/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishOneMessageSubscribeUnfragmented() throws Exception { k3po.finish(); @@ -146,9 +124,6 @@ public void shouldPublishOneMessageSubscribeUnfragmented() throws Exception @Specification({ "${net}/publish.multiple.messages.with.delay/client", "${app}/publish.multiple.messages/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = PUBLISH_TIMEOUT_NAME, value = "5") public void shouldPublishMultipleMessagesWithDelay() throws Exception { @@ -164,9 +139,6 @@ public void shouldPublishMultipleMessagesWithDelay() throws Exception @Specification({ "${net}/publish.messages.with.topic.alias.distinct/client", "${app}/publish.messages.with.topic.alias.distinct/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") public void shouldPublishMessagesWithTopicAliasDistinct() throws Exception { @@ -178,9 +150,6 @@ public void shouldPublishMessagesWithTopicAliasDistinct() throws Exception @Specification({ "${net}/publish.messages.with.topic.alias.repeated/client", "${app}/publish.messages.with.topic.alias.repeated/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") public void shouldPublishMessagesWithTopicAliasRepeated() throws Exception { @@ -192,9 +161,6 @@ public void shouldPublishMessagesWithTopicAliasRepeated() throws Exception @Specification({ "${net}/publish.messages.with.topic.alias.replaced/client", "${app}/publish.messages.with.topic.alias.replaced/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "1") public void shouldPublishMessagesWithTopicAliasReplaced() throws Exception { @@ -206,9 +172,6 @@ public void shouldPublishMessagesWithTopicAliasReplaced() throws Exception @Specification({ "${net}/publish.messages.with.topic.alias.invalid.scope/client", "${app}/publish.messages.with.topic.alias.invalid.scope/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "1") public void shouldSendMessagesWithTopicAliasInvalidScope() throws Exception { @@ -218,10 +181,8 @@ public void shouldSendMessagesWithTopicAliasInvalidScope() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.topic.not.routed/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/publish.topic.not.routed/client", + "${app}/session.connect/server"}) public void shouldRejectTopicNotRouted() throws Exception { k3po.finish(); @@ -230,10 +191,8 @@ public void shouldRejectTopicNotRouted() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.reject.topic.alias.exceeds.maximum/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/publish.reject.topic.alias.exceeds.maximum/client", + "${app}/session.connect/server"}) public void shouldRejectPublishWhenTopicAliasExceedsMaximum() throws Exception { k3po.finish(); @@ -242,10 +201,8 @@ public void shouldRejectPublishWhenTopicAliasExceedsMaximum() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.reject.topic.alias.repeated/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/publish.reject.topic.alias.repeated/client", + "${app}/session.connect/server"}) @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") public void shouldRejectPublishWithMultipleTopicAliases() throws Exception { @@ -255,10 +212,8 @@ public void shouldRejectPublishWithMultipleTopicAliases() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.reject.client.sent.subscription.id/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/publish.reject.client.sent.subscription.id/client", + "${app}/session.connect/server"}) @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") public void shouldRejectPublishClientSentSubscriptionId() throws Exception { @@ -268,11 +223,8 @@ public void shouldRejectPublishClientSentSubscriptionId() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.reject.invalid.payload.format/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") + "${net}/publish.reject.invalid.payload.format/client", + "${app}/session.connect/server"}) public void shouldRejectPublishInvalidPayloadFormat() throws Exception { k3po.finish(); @@ -281,12 +233,9 @@ public void shouldRejectPublishInvalidPayloadFormat() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.reject.qos1.not.supported/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "0") - @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - public void shouldRejectPublisQos1NotSupported() throws Exception + "${net}/publish.reject.qos1.not.supported/client", + "${app}/publish.reject.qos.not.supported/server"}) + public void shouldRejectPublishQos1NotSupported() throws Exception { k3po.finish(); } @@ -294,12 +243,9 @@ public void shouldRejectPublisQos1NotSupported() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.reject.qos2.not.supported/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "0") - @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") - public void shouldRejectPublisQos2NotSupported() throws Exception + "${net}/publish.reject.qos2.not.supported/client", + "${app}/publish.reject.qos.not.supported/server"}) + public void shouldRejectPublishQos2NotSupported() throws Exception { k3po.finish(); } @@ -309,10 +255,6 @@ public void shouldRejectPublisQos2NotSupported() throws Exception @Configuration("server.yaml") @Specification({ "${net}/publish.reject.qos0.with.packet.id/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") public void shouldRejectPublishQos0WithPacketId() throws Exception { k3po.finish(); @@ -323,10 +265,6 @@ public void shouldRejectPublishQos0WithPacketId() throws Exception @Configuration("server.yaml") @Specification({ "${net}/publish.reject.qos1.without.packet.id/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") public void shouldRejectPublishQos1WithoutPacketId() throws Exception { k3po.finish(); @@ -337,10 +275,6 @@ public void shouldRejectPublishQos1WithoutPacketId() throws Exception @Configuration("server.yaml") @Specification({ "${net}/publish.reject.qos2.without.packet.id/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") public void shouldRejectPublishQos2WithoutPacketId() throws Exception { k3po.finish(); @@ -349,12 +283,8 @@ public void shouldRejectPublishQos2WithoutPacketId() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.reject.retain.not.supported/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = RETAIN_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") - @Configure(name = TOPIC_ALIAS_MAXIMUM_NAME, value = "2") + "${net}/publish.reject.retain.not.supported/client", + "${app}/publish.reject.retain.not.supported/server"}) public void shouldRejectPublishRetainNotSupported() throws Exception { k3po.finish(); @@ -365,9 +295,6 @@ public void shouldRejectPublishRetainNotSupported() throws Exception @Specification({ "${net}/publish.with.user.property/client", "${app}/publish.with.user.property/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishWithUserProperty() throws Exception { k3po.finish(); @@ -378,9 +305,6 @@ public void shouldPublishWithUserProperty() throws Exception @Specification({ "${net}/publish.with.user.properties.distinct/client", "${app}/publish.with.user.properties.distinct/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishWithDistinctUserProperties() throws Exception { k3po.finish(); @@ -391,9 +315,6 @@ public void shouldPublishWithDistinctUserProperties() throws Exception @Specification({ "${net}/publish.with.user.properties.repeated/client", "${app}/publish.with.user.properties.repeated/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishWithRepeatedUserProperties() throws Exception { k3po.finish(); @@ -404,9 +325,6 @@ public void shouldPublishWithRepeatedUserProperties() throws Exception @Specification({ "${net}/publish.empty.retained.message/client", "${app}/publish.empty.retained.message/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishEmptyRetainedMessage() throws Exception { k3po.finish(); @@ -417,9 +335,6 @@ public void shouldPublishEmptyRetainedMessage() throws Exception @Specification({ "${net}/publish.empty.message/client", "${app}/publish.empty.message/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldPublishEmptyMessage() throws Exception { k3po.finish(); @@ -428,10 +343,8 @@ public void shouldPublishEmptyMessage() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/publish.reject.packet.too.large/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/publish.reject.packet.too.large/client", + "${app}/publish.reject.packet.too.large/server"}) @Configure(name = ENGINE_BUFFER_SLOT_CAPACITY_NAME, value = "8192") public void shouldRejectPacketTooLarge() throws Exception { diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java similarity index 75% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java rename to incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java index c941c7dc33..aaceb67114 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SessionIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java @@ -13,13 +13,11 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.server; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.KEEP_ALIVE_MINIMUM_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SERVER_REFERENCE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -64,8 +62,6 @@ public class SessionIT @Specification({ "${net}/session.connect.with.session.expiry/client", "${app}/session.connect.with.session.expiry/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldConnectWithSessionExpiry() throws Exception { k3po.finish(); @@ -76,8 +72,6 @@ public void shouldConnectWithSessionExpiry() throws Exception @Specification({ "${net}/session.connect.override.session.expiry/client", "${app}/session.connect.override.session.expiry/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldConnectServerOverridesSessionExpiry() throws Exception { k3po.finish(); @@ -88,8 +82,6 @@ public void shouldConnectServerOverridesSessionExpiry() throws Exception @Specification({ "${net}/session.subscribe/client", "${app}/session.subscribe/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldSubscribeSaveSubscriptionsInSession() throws Exception { k3po.finish(); @@ -100,8 +92,6 @@ public void shouldSubscribeSaveSubscriptionsInSession() throws Exception @Specification({ "${net}/session.subscribe.multiple.isolated/client", "${app}/session.subscribe.multiple.isolated/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception { k3po.finish(); @@ -112,8 +102,6 @@ public void shouldSubscribeMultipleSaveSubscriptionsInSession() throws Exception @Specification({ "${net}/session.subscribe.via.session.state/client", "${app}/session.subscribe.via.session.state/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldSubscribeViaSessionState() throws Exception { k3po.finish(); @@ -124,8 +112,6 @@ public void shouldSubscribeViaSessionState() throws Exception @Specification({ "${net}/session.unsubscribe.after.subscribe/client", "${app}/session.unsubscribe.after.subscribe/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldUnsubscribeSaveNewSessionState() throws Exception { k3po.finish(); @@ -136,8 +122,6 @@ public void shouldUnsubscribeSaveNewSessionState() throws Exception @Specification({ "${net}/session.unsubscribe.after.subscribe.deferred/client", "${app}/session.unsubscribe.after.subscribe.deferred/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldUnsubscribeAfterSubscribeDeferred() throws Exception { k3po.finish(); @@ -148,8 +132,6 @@ public void shouldUnsubscribeAfterSubscribeDeferred() throws Exception @Specification({ "${net}/session.subscribe/client", "${app}/session.unsubscribe.via.session.state/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldUnsubscribeViaSessionState() throws Exception { k3po.finish(); @@ -160,8 +142,6 @@ public void shouldUnsubscribeViaSessionState() throws Exception @Specification({ "${net}/session.will.message.retain/client", "${app}/session.will.message.retain/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldStoreWillMessageInSessionState() throws Exception { k3po.finish(); @@ -172,8 +152,6 @@ public void shouldStoreWillMessageInSessionState() throws Exception @Specification({ "${net}/session.connect.payload.fragmented/client", "${app}/session.will.message.retain/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldStoreWillMessageInSessionStatePayloadFragmented() throws Exception { k3po.finish(); @@ -185,8 +163,6 @@ public void shouldStoreWillMessageInSessionStatePayloadFragmented() throws Excep @Specification({ "${net}/session.will.message.normal.disconnect/client", "${app}/session.will.message.normal.disconnect/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldCloseSessionNormalDisconnect() throws Exception { k3po.finish(); @@ -197,8 +173,6 @@ public void shouldCloseSessionNormalDisconnect() throws Exception @Specification({ "${net}/session.will.message.disconnect.with.will.message/client", "${app}/session.will.message.abort/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldCloseSessionDisconnectWithWill() throws Exception { k3po.finish(); @@ -209,8 +183,6 @@ public void shouldCloseSessionDisconnectWithWill() throws Exception @Specification({ "${net}/session.will.message.no.ping.within.keep.alive/client", "${app}/session.will.message.abort/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = KEEP_ALIVE_MINIMUM_NAME, value = "1") public void shouldCloseSessionWithKeepAliveExpired() throws Exception { @@ -222,8 +194,6 @@ public void shouldCloseSessionWithKeepAliveExpired() throws Exception @Specification({ "${net}/session.exists.clean.start/client", "${app}/session.exists.clean.start/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldCloseExistingConnectionCleanStart() throws Exception { k3po.finish(); @@ -234,8 +204,6 @@ public void shouldCloseExistingConnectionCleanStart() throws Exception @Specification({ "${net}/session.abort.reconnect.non.clean.start/client", "${app}/session.abort.reconnect.non.clean.start/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldClientAbortAndReconnectWithNonCleanStart() throws Exception { k3po.finish(); @@ -246,8 +214,6 @@ public void shouldClientAbortAndReconnectWithNonCleanStart() throws Exception @Specification({ "${net}/session.client.takeover/client", "${app}/session.client.takeover/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldClientTakeOverSession() throws Exception { k3po.finish(); @@ -258,8 +224,6 @@ public void shouldClientTakeOverSession() throws Exception @Specification({ "${net}/session.server.redirect.after.connack/client", "${app}/session.server.redirect.after.connack/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = SERVER_REFERENCE_NAME, value = "mqtt-1.example.com:1883") public void shouldRedirectAfterConnack() throws Exception { @@ -271,8 +235,6 @@ public void shouldRedirectAfterConnack() throws Exception @Specification({ "${net}/session.server.redirect.before.connack/client", "${app}/session.server.redirect.before.connack/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") @Configure(name = SERVER_REFERENCE_NAME, value = "mqtt-1.example.com:1883") public void shouldRedirectBeforeConnack() throws Exception { @@ -284,8 +246,6 @@ public void shouldRedirectBeforeConnack() throws Exception @Specification({ "${net}/session.subscribe.publish.routing/client", "${app}/session.subscribe.publish.routing/server"}) - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldSubscribeAndPublishToNonDefaultRoute() throws Exception { k3po.finish(); diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SubscribeIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java similarity index 61% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SubscribeIT.java rename to incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java index 1b27d63092..5f2b0a061a 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/SubscribeIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java @@ -13,15 +13,10 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.server; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.NO_LOCAL_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SUBSCRIPTION_IDENTIFIERS_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.WILDCARD_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -65,9 +60,6 @@ public class SubscribeIT @Specification({ "${net}/subscribe.one.message/client", "${app}/subscribe.one.message/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveOneMessage() throws Exception { k3po.finish(); @@ -78,9 +70,6 @@ public void shouldReceiveOneMessage() throws Exception @Specification({ "${net}/subscribe.one.message.receive.response.topic.and.correlation.data/client", "${app}/subscribe.one.message.receive.response.topic.and.correlation.data/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveCorrelationData() throws Exception { k3po.finish(); @@ -91,9 +80,6 @@ public void shouldReceiveCorrelationData() throws Exception @Specification({ "${net}/subscribe.one.message.user.properties.unaltered/client", "${app}/subscribe.one.message.user.properties.unaltered/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveOneMessageWithUserPropertiesUnaltered() throws Exception { k3po.finish(); @@ -102,10 +88,8 @@ public void shouldReceiveOneMessageWithUserPropertiesUnaltered() throws Exceptio @Test @Configuration("server.yaml") @Specification({ - "${net}/subscribe.one.message.with.invalid.subscription.id/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/subscribe.one.message.with.invalid.subscription.id/client", + "${app}/session.connect/server"}) public void shouldReceiveOneMessageWithInvalidSubscriptionId() throws Exception { k3po.finish(); @@ -116,9 +100,6 @@ public void shouldReceiveOneMessageWithInvalidSubscriptionId() throws Exception @Specification({ "${net}/subscribe.topic.filter.single.exact/client", "${app}/subscribe.topic.filter.single.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterExact() throws Exception { k3po.finish(); @@ -129,9 +110,6 @@ public void shouldFilterExact() throws Exception @Specification({ "${net}/subscribe.topic.filter.multi.level.wildcard/client", "${app}/subscribe.topic.filter.multi.level.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterMultiLevelWildcard() throws Exception { k3po.finish(); @@ -142,9 +120,6 @@ public void shouldFilterMultiLevelWildcard() throws Exception @Specification({ "${net}/subscribe.topic.filter.single.and.multi.level.wildcard/client", "${app}/subscribe.topic.filter.single.and.multi.level.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterSingleAndMultiLevelWildcard() throws Exception { k3po.finish(); @@ -155,9 +130,6 @@ public void shouldFilterSingleAndMultiLevelWildcard() throws Exception @Specification({ "${net}/subscribe.topic.filter.single.level.wildcard/client", "${app}/subscribe.topic.filter.single.level.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterSingleLevelWildcard() throws Exception { k3po.finish(); @@ -168,9 +140,6 @@ public void shouldFilterSingleLevelWildcard() throws Exception @Specification({ "${net}/subscribe.topic.filter.two.single.level.wildcard/client", "${app}/subscribe.topic.filter.two.single.level.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterTwoSingleLevelWildcard() throws Exception { k3po.finish(); @@ -181,9 +150,6 @@ public void shouldFilterTwoSingleLevelWildcard() throws Exception @Specification({ "${net}/subscribe.topic.filters.aggregated.both.exact/client", "${app}/subscribe.topic.filters.aggregated.both.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterAggregatedBothExact() throws Exception { k3po.finish(); @@ -194,9 +160,6 @@ public void shouldFilterAggregatedBothExact() throws Exception @Specification({ "${net}/subscribe.topic.filters.isolated.both.exact/client", "${app}/subscribe.topic.filters.isolated.both.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterIsolatedBothExact() throws Exception { k3po.finish(); @@ -207,9 +170,6 @@ public void shouldFilterIsolatedBothExact() throws Exception @Specification({ "${net}/subscribe.topic.filters.isolated.both.wildcard/client", "${app}/subscribe.topic.filters.isolated.both.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterIsolatedBothWildcard() throws Exception { k3po.finish(); @@ -220,9 +180,6 @@ public void shouldFilterIsolatedBothWildcard() throws Exception @Specification({ "${net}/subscribe.topic.filters.aggregated.exact.and.wildcard/client", "${app}/subscribe.topic.filters.aggregated.exact.and.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterAggregatedExactAndWildcard() throws Exception { k3po.finish(); @@ -233,9 +190,6 @@ public void shouldFilterAggregatedExactAndWildcard() throws Exception @Specification({ "${net}/subscribe.topic.filters.disjoint.wildcards/client", "${app}/subscribe.topic.filters.disjoint.wildcards/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterDisjointWildcard() throws Exception { k3po.finish(); @@ -246,9 +200,6 @@ public void shouldFilterDisjointWildcard() throws Exception @Specification({ "${net}/subscribe.topic.filters.isolated.exact.and.wildcard/client", "${app}/subscribe.topic.filters.isolated.exact.and.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterIsolatedExactAndWildcard() throws Exception { k3po.finish(); @@ -259,9 +210,6 @@ public void shouldFilterIsolatedExactAndWildcard() throws Exception @Specification({ "${net}/subscribe.topic.filters.overlapping.wildcards/client", "${app}/subscribe.topic.filters.overlapping.wildcards/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldFilterOverlappingWildcard() throws Exception { k3po.finish(); @@ -272,9 +220,6 @@ public void shouldFilterOverlappingWildcard() throws Exception @Specification({ "${net}/subscribe.get.retained.as.published/client", "${app}/subscribe.get.retained.as.published/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveRetainedAsPublished() throws Exception { k3po.finish(); @@ -285,9 +230,6 @@ public void shouldReceiveRetainedAsPublished() throws Exception @Specification({ "${net}/subscribe.qos0.publish.retained.no.replay/client", "${app}/subscribe.qos0.publish.retained.no.replay/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldNotReplayRetained() throws Exception { k3po.finish(); @@ -298,9 +240,6 @@ public void shouldNotReplayRetained() throws Exception @Specification({ "${net}/subscribe.qos0.replay.retained.no.packet.id/client", "${app}/subscribe.qos0.replay.retained.no.packet.id/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveAndReplayRetainedQos0() throws Exception { k3po.finish(); @@ -309,10 +248,8 @@ public void shouldReceiveAndReplayRetainedQos0() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/subscribe.reject.no.local/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/subscribe.reject.no.local/client", + "${app}/session.connect/server"}) @Configure(name = NO_LOCAL_NAME, value = "false") public void shouldRejectNoLocal() throws Exception { @@ -324,9 +261,6 @@ public void shouldRejectNoLocal() throws Exception @Specification({ "${net}/subscribe.receive.message/client", "${app}/subscribe.receive.message/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveOneMessageAfterPublish() throws Exception { k3po.finish(); @@ -337,9 +271,6 @@ public void shouldReceiveOneMessageAfterPublish() throws Exception @Specification({ "${net}/subscribe.receive.message.overlapping.wildcard/client", "${app}/subscribe.receive.message.overlapping.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveMessageOverlappingWildcard() throws Exception { k3po.finish(); @@ -350,9 +281,6 @@ public void shouldReceiveMessageOverlappingWildcard() throws Exception @Specification({ "${net}/subscribe.receive.message.wildcard/client", "${app}/subscribe.receive.message.wildcard/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveOneMessageWithPatternTopic() throws Exception { k3po.finish(); @@ -363,9 +291,6 @@ public void shouldReceiveOneMessageWithPatternTopic() throws Exception @Specification({ "${net}/subscribe.retain.as.published/client", "${app}/subscribe.retain.as.published/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldReceiveRetainAsPublished() throws Exception { k3po.finish(); @@ -376,9 +301,6 @@ public void shouldReceiveRetainAsPublished() throws Exception @Specification({ "${net}/subscribe.publish.no.local/client", "${app}/subscribe.publish.no.local/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldNotReceivePublishLocal() throws Exception { k3po.finish(); @@ -387,10 +309,8 @@ public void shouldNotReceivePublishLocal() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/subscribe.invalid.fixed.header.flags/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/subscribe.invalid.fixed.header.flags/client", + "${app}/session.connect/server"}) public void shouldRejectMalformedPacket() throws Exception { k3po.finish(); @@ -399,10 +319,8 @@ public void shouldRejectMalformedPacket() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/subscribe.invalid.topic.filter/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/subscribe.invalid.topic.filter/client", + "${app}/session.connect/server"}) public void shouldRejectInvalidTopicFilter() throws Exception { k3po.finish(); @@ -411,11 +329,8 @@ public void shouldRejectInvalidTopicFilter() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/subscribe.reject.wildcard.subscriptions.not.supported/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = WILDCARD_SUBSCRIPTION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/subscribe.reject.wildcard.subscriptions.not.supported/client", + "${app}/subscribe.reject.wildcard.subscriptions.not.supported/server"}) public void shouldRejectWildcardSubscriptionsNotSupported() throws Exception { k3po.finish(); @@ -424,11 +339,8 @@ public void shouldRejectWildcardSubscriptionsNotSupported() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/subscribe.reject.subscription.ids.not.supported/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SUBSCRIPTION_IDENTIFIERS_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/subscribe.reject.subscription.ids.not.supported/client", + "${app}/subscribe.reject.subscription.ids.not.supported/server"}) public void shouldRejectSubscriptionIdentifiersNotSupported() throws Exception { k3po.finish(); @@ -437,12 +349,20 @@ public void shouldRejectSubscriptionIdentifiersNotSupported() throws Exception @Test @Configuration("server.yaml") @Specification({ - "${net}/subscribe.reject.shared.subscriptions.not.supported/client"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "false") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") + "${net}/subscribe.reject.shared.subscriptions.not.supported/client", + "${app}/subscribe.reject.shared.subscriptions.not.supported/server"}) public void shouldRejectSharedSubscriptionsNotSupported() throws Exception { k3po.finish(); } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/subscribe.topic.filters.non.successful/client", + "${app}/subscribe.topic.filters.non.successful/server"}) + public void shouldFilterNonSuccessful() throws Exception + { + k3po.finish(); + } } diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/UnsubscribeIT.java b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java similarity index 69% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/UnsubscribeIT.java rename to incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java index 4eb2ffbb0b..14752d0c83 100644 --- a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/UnsubscribeIT.java +++ b/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java @@ -13,12 +13,9 @@ * License for the specific language governing permissions and limitations * under the License. */ -package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; +package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.server; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.MAXIMUM_QOS_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SESSION_AVAILABLE_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SHARED_SUBSCRIPTION_AVAILABLE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -33,7 +30,6 @@ import io.aklivity.zilla.runtime.engine.test.EngineRule; import io.aklivity.zilla.runtime.engine.test.annotation.Configuration; -import io.aklivity.zilla.runtime.engine.test.annotation.Configure; public class UnsubscribeIT { @@ -41,7 +37,7 @@ public class UnsubscribeIT .addScriptRoot("net", "io/aklivity/zilla/specs/binding/mqtt/streams/network") .addScriptRoot("app", "io/aklivity/zilla/specs/binding/mqtt/streams/application"); - private final TestRule timeout = new DisableOnDebug(new Timeout(20, SECONDS)); + private final TestRule timeout = new DisableOnDebug(new Timeout(10, SECONDS)); private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") @@ -62,9 +58,6 @@ public class UnsubscribeIT @Specification({ "${net}/unsubscribe.after.subscribe/client", "${app}/unsubscribe.after.subscribe/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldAcknowledge() throws Exception { k3po.finish(); @@ -75,9 +68,6 @@ public void shouldAcknowledge() throws Exception @Specification({ "${net}/unsubscribe.topic.filter.single/client", "${app}/unsubscribe.topic.filter.single/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldAcknowledgeSingleTopicFilters() throws Exception { k3po.finish(); @@ -88,9 +78,6 @@ public void shouldAcknowledgeSingleTopicFilters() throws Exception @Specification({ "${net}/unsubscribe.publish.unfragmented/client", "${app}/unsubscribe.publish.unfragmented/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldAcknowledgeAndPublishUnfragmented() throws Exception { k3po.finish(); @@ -101,9 +88,6 @@ public void shouldAcknowledgeAndPublishUnfragmented() throws Exception @Specification({ "${net}/unsubscribe.aggregated.topic.filters.both.exact/client", "${app}/unsubscribe.aggregated.topic.filters.both.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldAcknowledgeAggregatedTopicFiltersBothExact() throws Exception { k3po.finish(); @@ -114,9 +98,6 @@ public void shouldAcknowledgeAggregatedTopicFiltersBothExact() throws Exception @Specification({ "${net}/unsubscribe.no.matching.subscription/client", "${app}/subscribe.topic.filter.single.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldAcknowledgeNoMatchingSubscription() throws Exception { k3po.finish(); @@ -127,9 +108,6 @@ public void shouldAcknowledgeNoMatchingSubscription() throws Exception @Specification({ "${net}/unsubscribe.reject.invalid.fixed.header.flags/client", "${app}/subscribe.topic.filter.single.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectMalformedPacket() throws Exception { k3po.finish(); @@ -140,9 +118,6 @@ public void shouldRejectMalformedPacket() throws Exception @Specification({ "${net}/unsubscribe.reject.missing.packet.id/client", "${app}/subscribe.topic.filter.single.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectWithoutPacketId() throws Exception { k3po.finish(); @@ -153,11 +128,18 @@ public void shouldRejectWithoutPacketId() throws Exception @Specification({ "${net}/unsubscribe.reject.no.topic.filter/client", "${app}/subscribe.topic.filter.single.exact/server"}) - @Configure(name = SESSION_AVAILABLE_NAME, value = "false") - @Configure(name = SHARED_SUBSCRIPTION_AVAILABLE_NAME, value = "true") - @Configure(name = MAXIMUM_QOS_NAME, value = "2") public void shouldRejectNoTopicFilter() throws Exception { k3po.finish(); } + + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/unsubscribe.topic.filters.non.successful/client", + "${app}/unsubscribe.topic.filters.non.successful/server"}) + public void shouldAcknowledgeNonSuccessful() throws Exception + { + k3po.finish(); + } } From dc7da86f367b43ee091792eea7587923b929286a Mon Sep 17 00:00:00 2001 From: John Fallows Date: Wed, 20 Sep 2023 07:23:35 -0700 Subject: [PATCH 096/115] Promote mqtt and mqtt-kafka out of incubator --- cloud/docker-image/pom.xml | 24 +++++++++---------- .../src/main/docker/release/zpm.json.template | 2 ++ incubator/pom.xml | 14 ----------- .../binding-mqtt-kafka}/COPYRIGHT | 0 .../binding-mqtt-kafka}/LICENSE | 0 .../binding-mqtt-kafka/NOTICE | 0 .../binding-mqtt-kafka}/NOTICE.template | 0 .../binding-mqtt-kafka}/mvnw | 0 .../binding-mqtt-kafka}/mvnw.cmd | 0 .../binding-mqtt-kafka/pom.xml | 4 ++-- .../config/MqttKafkaConditionConfig.java | 0 .../mqtt/kafka/internal/InstanceId.java | 0 .../mqtt/kafka/internal/MqttKafkaBinding.java | 0 .../internal/MqttKafkaBindingContext.java | 0 .../internal/MqttKafkaBindingFactorySpi.java | 0 .../internal/MqttKafkaConfiguration.java | 0 .../config/MqttKafkaBindingConfig.java | 0 .../MqttKafkaConditionConfigAdapter.java | 0 .../config/MqttKafkaHeaderHelper.java | 0 .../config/MqttKafkaOptionsConfig.java | 0 .../config/MqttKafkaOptionsConfigAdapter.java | 0 .../internal/config/MqttKafkaRouteConfig.java | 0 .../config/MqttKafkaTopicsConfig.java | 0 .../stream/MqttKafkaProxyFactory.java | 0 .../stream/MqttKafkaPublishFactory.java | 0 .../stream/MqttKafkaSessionFactory.java | 0 .../kafka/internal/stream/MqttKafkaState.java | 0 .../stream/MqttKafkaStreamFactory.java | 0 .../stream/MqttKafkaSubscribeFactory.java | 0 .../src/main/moditect/module-info.java | 0 ...a.runtime.engine.binding.BindingFactorySpi | 0 ...me.engine.config.ConditionConfigAdapterSpi | 0 ...time.engine.config.OptionsConfigAdapterSpi | 0 .../src/main/zilla/internal.idl | 0 .../internal/MqttKafkaConfigurationTest.java | 0 .../MqttKafkaConditionConfigAdapterTest.java | 0 .../MqttKafkaOptionsConfigAdapterTest.java | 0 .../stream/MqttKafkaPublishProxyIT.java | 0 .../stream/MqttKafkaSessionProxyIT.java | 0 .../stream/MqttKafkaSubscribeProxyIT.java | 0 .../binding-mqtt}/COPYRIGHT | 0 .../binding-mqtt}/LICENSE | 0 {incubator => runtime}/binding-mqtt/NOTICE | 0 .../binding-mqtt}/NOTICE.template | 0 .../binding-mqtt}/mvnw | 0 .../binding-mqtt}/mvnw.cmd | 0 {incubator => runtime}/binding-mqtt/pom.xml | 4 ++-- .../mqtt/config/MqttConditionConfig.java | 0 .../config/MqttConditionConfigBuilder.java | 0 .../mqtt/config/MqttPublishConfig.java | 0 .../mqtt/config/MqttSessionConfig.java | 0 .../mqtt/config/MqttSubscribeConfig.java | 0 .../binding/mqtt/internal/MqttBinding.java | 0 .../mqtt/internal/MqttBindingContext.java | 0 .../mqtt/internal/MqttBindingFactorySpi.java | 0 .../mqtt/internal/MqttConfiguration.java | 0 .../mqtt/internal/MqttReasonCodes.java | 0 .../binding/mqtt/internal/MqttValidator.java | 0 .../config/MqttAuthorizationConfig.java | 0 .../internal/config/MqttBindingConfig.java | 0 .../config/MqttConditionConfigAdapter.java | 0 .../internal/config/MqttConditionMatcher.java | 0 .../internal/config/MqttOptionsConfig.java | 0 .../config/MqttOptionsConfigAdapter.java | 0 .../mqtt/internal/config/MqttRouteConfig.java | 0 .../internal/stream/MqttClientFactory.java | 0 .../internal/stream/MqttServerFactory.java | 0 .../mqtt/internal/stream/MqttState.java | 0 .../internal/stream/MqttStreamFactory.java | 0 .../src/main/moditect/module-info.java | 0 ...a.runtime.engine.binding.BindingFactorySpi | 0 ...me.engine.config.ConditionConfigAdapterSpi | 0 ...time.engine.config.OptionsConfigAdapterSpi | 0 .../binding-mqtt/src/main/zilla/protocol.idl | 0 .../mqtt/internal/MqttConfigurationTest.java | 0 .../binding/mqtt/internal/ValidatorTest.java | 0 .../MqttConditionConfigAdapterTest.java | 0 .../config/MqttOptionsConfigAdapterTest.java | 0 .../internal/stream/client/ConnectionIT.java | 0 .../mqtt/internal/stream/client/PingIT.java | 0 .../internal/stream/client/PublishIT.java | 0 .../internal/stream/client/SubscribeIT.java | 0 .../internal/stream/client/UnsubscribeIT.java | 0 .../internal/stream/server/ConnectionIT.java | 0 .../mqtt/internal/stream/server/PingIT.java | 0 .../internal/stream/server/PublishIT.java | 0 .../internal/stream/server/SessionIT.java | 0 .../internal/stream/server/SubscribeIT.java | 0 .../internal/stream/server/UnsubscribeIT.java | 0 runtime/pom.xml | 12 ++++++++++ .../binding-mqtt-kafka.spec}/COPYRIGHT | 0 .../binding-mqtt-kafka.spec}/LICENSE | 0 .../binding-mqtt-kafka.spec/NOTICE | 2 +- .../binding-mqtt-kafka.spec}/NOTICE.template | 0 .../binding-mqtt-kafka.spec}/mvnw | 0 .../binding-mqtt-kafka.spec}/mvnw.cmd | 0 .../binding-mqtt-kafka.spec/pom.xml | 4 ++-- .../src/main/moditect/module-info.java | 0 .../mqtt/kafka/config/proxy.options.yaml | 0 ...xy.when.capabilities.with.kafka.topic.yaml | 0 .../binding/mqtt/kafka/config/proxy.yaml | 0 .../kafka/schema/mqtt.kafka.schema.patch.json | 0 .../publish.client.sent.abort/client.rpt | 0 .../publish.client.sent.abort/server.rpt | 0 .../publish.client.sent.reset/client.rpt | 0 .../publish.client.sent.reset/server.rpt | 0 .../kafka/publish.empty.message/client.rpt | 0 .../kafka/publish.empty.message/server.rpt | 0 .../kafka/publish.multiple.clients/client.rpt | 0 .../kafka/publish.multiple.clients/server.rpt | 0 .../publish.multiple.messages/client.rpt | 0 .../publish.multiple.messages/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../kafka/publish.one.message/client.rpt | 0 .../kafka/publish.one.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../streams/kafka/publish.retained/client.rpt | 0 .../streams/kafka/publish.retained/server.rpt | 0 .../publish.server.sent.abort/client.rpt | 0 .../publish.server.sent.abort/server.rpt | 0 .../kafka/publish.server.sent.data/client.rpt | 0 .../kafka/publish.server.sent.data/server.rpt | 0 .../publish.server.sent.flush/client.rpt | 0 .../publish.server.sent.flush/server.rpt | 0 .../publish.server.sent.reset/client.rpt | 0 .../publish.server.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../publish.with.user.property/client.rpt | 0 .../publish.with.user.property/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.cancel.session.expiry/client.rpt | 0 .../session.cancel.session.expiry/server.rpt | 0 .../session.client.sent.reset/client.rpt | 0 .../session.client.sent.reset/server.rpt | 0 .../kafka/session.client.takeover/client.rpt | 0 .../kafka/session.client.takeover/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.exists.clean.start/client.rpt | 0 .../session.exists.clean.start/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../streams/kafka/session.redirect/client.rpt | 0 .../streams/kafka/session.redirect/server.rpt | 0 .../session.server.sent.reset/client.rpt | 0 .../session.server.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../kafka/session.subscribe/client.rpt | 0 .../kafka/session.subscribe/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.client.sent.abort/client.rpt | 0 .../subscribe.client.sent.abort/server.rpt | 0 .../subscribe.client.sent.data/client.rpt | 0 .../subscribe.client.sent.data/server.rpt | 0 .../subscribe.client.sent.reset/client.rpt | 0 .../subscribe.client.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.filter.change.retain/client.rpt | 0 .../subscribe.filter.change.retain/server.rpt | 0 .../subscribe.multiple.message/client.rpt | 0 .../subscribe.multiple.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../kafka/subscribe.one.message/client.rpt | 0 .../kafka/subscribe.one.message/server.rpt | 0 .../subscribe.publish.no.local/client.rpt | 0 .../subscribe.publish.no.local/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../streams/kafka/subscribe.retain/client.rpt | 0 .../streams/kafka/subscribe.retain/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.server.sent.abort/client.rpt | 0 .../subscribe.server.sent.abort/server.rpt | 0 .../subscribe.server.sent.flush/client.rpt | 0 .../subscribe.server.sent.flush/server.rpt | 0 .../subscribe.server.sent.reset/client.rpt | 0 .../subscribe.server.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../unsubscribe.after.subscribe/client.rpt | 0 .../unsubscribe.after.subscribe/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../mqtt/publish.client.sent.abort/client.rpt | 0 .../mqtt/publish.client.sent.abort/server.rpt | 0 .../mqtt/publish.client.sent.reset/client.rpt | 0 .../mqtt/publish.client.sent.reset/server.rpt | 0 .../mqtt/publish.empty.message/client.rpt | 0 .../mqtt/publish.empty.message/server.rpt | 0 .../mqtt/publish.multiple.clients/client.rpt | 0 .../mqtt/publish.multiple.clients/server.rpt | 0 .../mqtt/publish.multiple.messages/client.rpt | 0 .../mqtt/publish.multiple.messages/server.rpt | 0 .../mqtt/publish.one.message/client.rpt | 0 .../mqtt/publish.one.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../streams/mqtt/publish.retained/client.rpt | 0 .../streams/mqtt/publish.retained/server.rpt | 0 .../mqtt/publish.server.sent.abort/client.rpt | 0 .../mqtt/publish.server.sent.abort/server.rpt | 0 .../mqtt/publish.server.sent.data/client.rpt | 0 .../mqtt/publish.server.sent.data/server.rpt | 0 .../mqtt/publish.server.sent.flush/client.rpt | 0 .../mqtt/publish.server.sent.flush/server.rpt | 0 .../mqtt/publish.server.sent.reset/client.rpt | 0 .../mqtt/publish.server.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../publish.with.user.property/client.rpt | 0 .../publish.with.user.property/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../mqtt/session.client.sent.reset/client.rpt | 0 .../mqtt/session.client.sent.reset/server.rpt | 0 .../mqtt/session.client.takeover/client.rpt | 0 .../mqtt/session.client.takeover/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.exists.clean.start/client.rpt | 0 .../session.exists.clean.start/server.rpt | 0 .../streams/mqtt/session.redirect/client.rpt | 0 .../streams/mqtt/session.redirect/server.rpt | 0 .../mqtt/session.server.sent.reset/client.rpt | 0 .../mqtt/session.server.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../streams/mqtt/session.subscribe/client.rpt | 0 .../streams/mqtt/session.subscribe/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../mqtt/session.will.message/client.rpt | 0 .../mqtt/session.will.message/server.rpt | 0 .../subscribe.client.sent.abort/client.rpt | 0 .../subscribe.client.sent.abort/server.rpt | 0 .../subscribe.client.sent.data/client.rpt | 0 .../subscribe.client.sent.data/server.rpt | 0 .../subscribe.client.sent.reset/client.rpt | 0 .../subscribe.client.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.filter.change.retain/client.rpt | 0 .../subscribe.filter.change.retain/server.rpt | 0 .../subscribe.multiple.message/client.rpt | 0 .../subscribe.multiple.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../mqtt/subscribe.one.message/client.rpt | 0 .../mqtt/subscribe.one.message/server.rpt | 0 .../subscribe.publish.no.local/client.rpt | 0 .../subscribe.publish.no.local/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.retain.as.published/client.rpt | 0 .../subscribe.retain.as.published/server.rpt | 0 .../streams/mqtt/subscribe.retain/client.rpt | 0 .../streams/mqtt/subscribe.retain/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.server.sent.abort/client.rpt | 0 .../subscribe.server.sent.abort/server.rpt | 0 .../subscribe.server.sent.flush/client.rpt | 0 .../subscribe.server.sent.flush/server.rpt | 0 .../subscribe.server.sent.reset/client.rpt | 0 .../subscribe.server.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../unsubscribe.after.subscribe/client.rpt | 0 .../unsubscribe.after.subscribe/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../binding/mqtt/kafka/config/SchemaTest.java | 0 .../binding/mqtt/kafka/streams/KafkaIT.java | 0 .../binding/mqtt/kafka/streams/MqttIT.java | 0 .../binding-mqtt.spec}/COPYRIGHT | 0 .../binding-mqtt.spec}/LICENSE | 0 {incubator => specs}/binding-mqtt.spec/NOTICE | 0 .../binding-mqtt.spec}/NOTICE.template | 0 .../binding-mqtt.spec}/mvnw | 0 .../binding-mqtt.spec}/mvnw.cmd | 0 .../binding-mqtt.spec/pom.xml | 4 ++-- .../binding/mqtt/internal/MqttFunctions.java | 0 .../src/main/moditect/module-info.java | 0 ...kaazing.k3po.lang.el.spi.FunctionMapperSpi | 0 .../main/resources/META-INF/zilla/mqtt.idl | 0 .../config/client.when.topic.or.sessions.yaml | 0 .../mqtt/config/client.when.topic.yaml | 0 .../specs/binding/mqtt/config/client.yaml | 0 .../config/server.credentials.password.yaml | 0 .../config/server.credentials.username.yaml | 0 .../mqtt/config/server.route.non.default.yaml | 0 .../specs/binding/mqtt/config/server.yaml | 0 .../mqtt/schema/mqtt.schema.patch.json | 0 .../application/client.sent.abort/client.rpt | 0 .../application/client.sent.abort/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../connect.maximum.qos.0/client.rpt | 0 .../connect.maximum.qos.0/server.rpt | 0 .../connect.non.successful.connack/client.rpt | 0 .../connect.non.successful.connack/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../connect.retain.not.supported/client.rpt | 0 .../connect.retain.not.supported/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../publish.empty.message/client.rpt | 0 .../publish.empty.message/server.rpt | 0 .../publish.empty.retained.message/client.rpt | 0 .../publish.empty.retained.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../publish.multiple.messages/client.rpt | 0 .../publish.multiple.messages/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../publish.one.message/client.rpt | 0 .../publish.one.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../application/publish.retained/client.rpt | 0 .../application/publish.retained/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../publish.with.user.property/client.rpt | 0 .../publish.with.user.property/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.client.takeover/client.rpt | 0 .../session.client.takeover/server.rpt | 0 .../session.connect.abort/client.rpt | 0 .../session.connect.abort/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../application/session.connect/client.rpt | 0 .../application/session.connect/server.rpt | 0 .../session.exists.clean.start/client.rpt | 0 .../session.exists.clean.start/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.server.sent.abort/client.rpt | 0 .../session.server.sent.abort/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../application/session.subscribe/client.rpt | 0 .../application/session.subscribe/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.will.message.abort/client.rpt | 0 .../session.will.message.abort/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.will.message.retain/client.rpt | 0 .../session.will.message.retain/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.one.message/client.rpt | 0 .../subscribe.one.message/server.rpt | 0 .../subscribe.publish.no.local/client.rpt | 0 .../subscribe.publish.no.local/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.receive.message/client.rpt | 0 .../subscribe.receive.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.retain.as.published/client.rpt | 0 .../subscribe.retain.as.published/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../unsubscribe.after.subscribe/client.rpt | 0 .../unsubscribe.after.subscribe/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../network/client.sent.abort/client.rpt | 0 .../network/client.sent.abort/server.rpt | 0 .../network/client.sent.close/client.rpt | 0 .../network/client.sent.close/server.rpt | 0 .../network/client.sent.reset/client.rpt | 0 .../network/client.sent.reset/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../network/connect.invalid.flags/client.rpt | 0 .../network/connect.invalid.flags/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../network/connect.maximum.qos.0/client.rpt | 0 .../network/connect.maximum.qos.0/server.rpt | 0 .../connect.non.successful.connack/client.rpt | 0 .../connect.non.successful.connack/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../connect.reject.second.connect/client.rpt | 0 .../connect.reject.second.connect/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../connect.retain.not.supported/client.rpt | 0 .../connect.retain.not.supported/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../connect.subscribe.unfragmented/client.rpt | 0 .../connect.subscribe.unfragmented/server.rpt | 0 .../connect.successful.fragmented/client.rpt | 0 .../connect.successful.fragmented/server.rpt | 0 .../network/connect.successful/client.rpt | 0 .../network/connect.successful/server.rpt | 0 .../connect.timeout.before.connect/client.rpt | 0 .../connect.timeout.before.connect/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../connect.will.invalid.will.qos/client.rpt | 0 .../connect.will.invalid.will.qos/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../streams/network/disconnect/client.rpt | 0 .../streams/network/disconnect/server.rpt | 0 .../network/ping.keep.alive/client.rpt | 0 .../network/ping.keep.alive/server.rpt | 0 .../network/ping.no.pingresp/client.rpt | 0 .../network/ping.no.pingresp/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../mqtt/streams/network/ping/client.rpt | 0 .../mqtt/streams/network/ping/server.rpt | 0 .../network/publish.empty.message/client.rpt | 0 .../network/publish.empty.message/server.rpt | 0 .../publish.empty.retained.message/client.rpt | 0 .../publish.empty.retained.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../publish.multiple.messages/client.rpt | 0 .../publish.multiple.messages/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../network/publish.one.message/client.rpt | 0 .../network/publish.one.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../network/publish.retained/client.rpt | 0 .../network/publish.retained/server.rpt | 0 .../publish.topic.not.routed/client.rpt | 0 .../publish.topic.not.routed/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../publish.with.user.property/client.rpt | 0 .../publish.with.user.property/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.client.takeover/client.rpt | 0 .../session.client.takeover/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.exists.clean.start/client.rpt | 0 .../session.exists.clean.start/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../network/session.subscribe/client.rpt | 0 .../network/session.subscribe/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../session.will.message.retain/client.rpt | 0 .../session.will.message.retain/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.invalid.topic.filter/client.rpt | 0 .../subscribe.invalid.topic.filter/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../network/subscribe.one.message/client.rpt | 0 .../network/subscribe.one.message/server.rpt | 0 .../subscribe.publish.no.local/client.rpt | 0 .../subscribe.publish.no.local/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.receive.message/client.rpt | 0 .../subscribe.receive.message/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.reject.no.local/client.rpt | 0 .../subscribe.reject.no.local/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../subscribe.retain.as.published/client.rpt | 0 .../subscribe.retain.as.published/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../unsubscribe.after.subscribe/client.rpt | 0 .../unsubscribe.after.subscribe/server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../client.rpt | 0 .../server.rpt | 0 .../specs/binding/mqtt/config/SchemaTest.java | 0 .../mqtt/internal/MqttFunctionsTest.java | 0 .../streams/application/ConnectionIT.java | 0 .../mqtt/streams/application/PublishIT.java | 0 .../mqtt/streams/application/SessionIT.java | 0 .../mqtt/streams/application/SubscribeIT.java | 0 .../streams/application/UnsubscribeIT.java | 0 .../mqtt/streams/network/ConnectionIT.java | 0 .../binding/mqtt/streams/network/PingIT.java | 0 .../mqtt/streams/network/PublishIT.java | 0 .../mqtt/streams/network/SessionIT.java | 0 .../mqtt/streams/network/SubscribeIT.java | 0 .../mqtt/streams/network/UnsubscribeIT.java | 0 specs/pom.xml | 12 ++++++++++ 888 files changed, 47 insertions(+), 35 deletions(-) rename {incubator/binding-mqtt-kafka.spec => runtime/binding-mqtt-kafka}/COPYRIGHT (100%) rename {incubator/binding-mqtt-kafka.spec => runtime/binding-mqtt-kafka}/LICENSE (100%) rename {incubator => runtime}/binding-mqtt-kafka/NOTICE (100%) rename {incubator/binding-mqtt-kafka.spec => runtime/binding-mqtt-kafka}/NOTICE.template (100%) rename {incubator/binding-mqtt-kafka.spec => runtime/binding-mqtt-kafka}/mvnw (100%) rename {incubator/binding-mqtt-kafka.spec => runtime/binding-mqtt-kafka}/mvnw.cmd (100%) rename {incubator => runtime}/binding-mqtt-kafka/pom.xml (98%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/config/MqttKafkaConditionConfig.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingFactorySpi.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaRouteConfig.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaState.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/moditect/module-info.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/main/zilla/internal.idl (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java (100%) rename {incubator => runtime}/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java (100%) rename {incubator/binding-mqtt.spec => runtime/binding-mqtt}/COPYRIGHT (100%) rename {incubator/binding-mqtt.spec => runtime/binding-mqtt}/LICENSE (100%) rename {incubator => runtime}/binding-mqtt/NOTICE (100%) rename {incubator/binding-mqtt.spec => runtime/binding-mqtt}/NOTICE.template (100%) rename {incubator/binding-mqtt-kafka => runtime/binding-mqtt}/mvnw (100%) rename {incubator/binding-mqtt-kafka => runtime/binding-mqtt}/mvnw.cmd (100%) rename {incubator => runtime}/binding-mqtt/pom.xml (98%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingFactorySpi.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttValidator.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttState.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttStreamFactory.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/moditect/module-info.java (100%) rename {incubator => runtime}/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi (100%) rename {incubator => runtime}/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi (100%) rename {incubator => runtime}/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi (100%) rename {incubator => runtime}/binding-mqtt/src/main/zilla/protocol.idl (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/ValidatorTest.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java (100%) rename {incubator => runtime}/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java (100%) rename {incubator/binding-mqtt-kafka => specs/binding-mqtt-kafka.spec}/COPYRIGHT (100%) rename {incubator/binding-mqtt-kafka => specs/binding-mqtt-kafka.spec}/LICENSE (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/NOTICE (92%) rename {incubator/binding-mqtt-kafka => specs/binding-mqtt-kafka.spec}/NOTICE.template (100%) rename {incubator/binding-mqtt.spec => specs/binding-mqtt-kafka.spec}/mvnw (100%) rename {incubator/binding-mqtt.spec => specs/binding-mqtt-kafka.spec}/mvnw.cmd (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/pom.xml (98%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/moditect/module-info.java (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.when.capabilities.with.kafka.topic.yaml (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/client.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/server.rpt (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java (100%) rename {incubator => specs}/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java (100%) rename {incubator/binding-mqtt => specs/binding-mqtt.spec}/COPYRIGHT (100%) rename {incubator/binding-mqtt => specs/binding-mqtt.spec}/LICENSE (100%) rename {incubator => specs}/binding-mqtt.spec/NOTICE (100%) rename {incubator/binding-mqtt => specs/binding-mqtt.spec}/NOTICE.template (100%) rename {incubator/binding-mqtt => specs/binding-mqtt.spec}/mvnw (100%) rename {incubator/binding-mqtt => specs/binding-mqtt.spec}/mvnw.cmd (100%) rename {incubator => specs}/binding-mqtt.spec/pom.xml (98%) rename {incubator => specs}/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/moditect/module-info.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/resources/META-INF/services/org.kaazing.k3po.lang.el.spi.FunctionMapperSpi (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.or.sessions.yaml (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.yaml (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.yaml (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.route.non.default.yaml (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.yaml (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java (100%) rename {incubator => specs}/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java (100%) diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 94e06c2dc7..9f84442be4 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -73,6 +73,12 @@ ${project.version} runtime + + ${project.groupId} + binding-mqtt + ${project.version} + runtime + ${project.groupId} binding-sse @@ -115,6 +121,12 @@ ${project.version} runtime + + ${project.groupId} + binding-mqtt-kafka + ${project.version} + runtime + ${project.groupId} binding-sse-kafka @@ -335,18 +347,6 @@ ${project.version} runtime - - ${project.groupId} - binding-mqtt - ${project.version} - runtime - - - ${project.groupId} - binding-mqtt-kafka - ${project.version} - runtime - ${project.groupId} command-dump diff --git a/cloud/docker-image/src/main/docker/release/zpm.json.template b/cloud/docker-image/src/main/docker/release/zpm.json.template index 0b2bfaccbd..8f01f75f91 100644 --- a/cloud/docker-image/src/main/docker/release/zpm.json.template +++ b/cloud/docker-image/src/main/docker/release/zpm.json.template @@ -22,6 +22,8 @@ "io.aklivity.zilla:binding-grpc-kafka", "io.aklivity.zilla:binding-kafka-grpc", "io.aklivity.zilla:binding-kafka", + "io.aklivity.zilla:binding-mqtt", + "io.aklivity.zilla:binding-mqtt-kafka", "io.aklivity.zilla:binding-proxy", "io.aklivity.zilla:binding-sse", "io.aklivity.zilla:binding-sse-kafka", diff --git a/incubator/pom.xml b/incubator/pom.xml index 12a909c7b3..4ea877d03a 100644 --- a/incubator/pom.xml +++ b/incubator/pom.xml @@ -18,13 +18,9 @@ binding-amqp.spec - binding-mqtt.spec - binding-mqtt-kafka.spec exporter-otlp.spec binding-amqp - binding-mqtt - binding-mqtt-kafka command-log command-dump @@ -41,16 +37,6 @@ binding-amqp ${project.version} - - ${project.groupId} - binding-mqtt - ${project.version} - - - ${project.groupId} - binding-mqtt-kafka - ${project.version} - ${project.groupId} command-log diff --git a/incubator/binding-mqtt-kafka.spec/COPYRIGHT b/runtime/binding-mqtt-kafka/COPYRIGHT similarity index 100% rename from incubator/binding-mqtt-kafka.spec/COPYRIGHT rename to runtime/binding-mqtt-kafka/COPYRIGHT diff --git a/incubator/binding-mqtt-kafka.spec/LICENSE b/runtime/binding-mqtt-kafka/LICENSE similarity index 100% rename from incubator/binding-mqtt-kafka.spec/LICENSE rename to runtime/binding-mqtt-kafka/LICENSE diff --git a/incubator/binding-mqtt-kafka/NOTICE b/runtime/binding-mqtt-kafka/NOTICE similarity index 100% rename from incubator/binding-mqtt-kafka/NOTICE rename to runtime/binding-mqtt-kafka/NOTICE diff --git a/incubator/binding-mqtt-kafka.spec/NOTICE.template b/runtime/binding-mqtt-kafka/NOTICE.template similarity index 100% rename from incubator/binding-mqtt-kafka.spec/NOTICE.template rename to runtime/binding-mqtt-kafka/NOTICE.template diff --git a/incubator/binding-mqtt-kafka.spec/mvnw b/runtime/binding-mqtt-kafka/mvnw similarity index 100% rename from incubator/binding-mqtt-kafka.spec/mvnw rename to runtime/binding-mqtt-kafka/mvnw diff --git a/incubator/binding-mqtt-kafka.spec/mvnw.cmd b/runtime/binding-mqtt-kafka/mvnw.cmd similarity index 100% rename from incubator/binding-mqtt-kafka.spec/mvnw.cmd rename to runtime/binding-mqtt-kafka/mvnw.cmd diff --git a/incubator/binding-mqtt-kafka/pom.xml b/runtime/binding-mqtt-kafka/pom.xml similarity index 98% rename from incubator/binding-mqtt-kafka/pom.xml rename to runtime/binding-mqtt-kafka/pom.xml index 317e1603b2..4c9910f84c 100644 --- a/incubator/binding-mqtt-kafka/pom.xml +++ b/runtime/binding-mqtt-kafka/pom.xml @@ -7,13 +7,13 @@ 4.0.0 io.aklivity.zilla - incubator + runtime develop-SNAPSHOT ../pom.xml binding-mqtt-kafka - zilla::incubator::binding-mqtt-kafka + zilla::runtime::binding-mqtt-kafka diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/config/MqttKafkaConditionConfig.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/config/MqttKafkaConditionConfig.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/config/MqttKafkaConditionConfig.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/config/MqttKafkaConditionConfig.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/InstanceId.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBinding.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingContext.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingFactorySpi.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingFactorySpi.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingFactorySpi.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaBindingFactorySpi.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfiguration.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaBindingConfig.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapter.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfig.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapter.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaRouteConfig.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaRouteConfig.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaRouteConfig.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaRouteConfig.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaTopicsConfig.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaProxyFactory.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaState.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaState.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaState.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaState.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaStreamFactory.java diff --git a/incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java rename to runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java diff --git a/incubator/binding-mqtt-kafka/src/main/moditect/module-info.java b/runtime/binding-mqtt-kafka/src/main/moditect/module-info.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/moditect/module-info.java rename to runtime/binding-mqtt-kafka/src/main/moditect/module-info.java diff --git a/incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi b/runtime/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi rename to runtime/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi diff --git a/incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi b/runtime/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi rename to runtime/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi diff --git a/incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi b/runtime/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi rename to runtime/binding-mqtt-kafka/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi diff --git a/incubator/binding-mqtt-kafka/src/main/zilla/internal.idl b/runtime/binding-mqtt-kafka/src/main/zilla/internal.idl similarity index 100% rename from incubator/binding-mqtt-kafka/src/main/zilla/internal.idl rename to runtime/binding-mqtt-kafka/src/main/zilla/internal.idl diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java rename to runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/MqttKafkaConfigurationTest.java diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java rename to runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaConditionConfigAdapterTest.java diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java rename to runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaOptionsConfigAdapterTest.java diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java rename to runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java rename to runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java diff --git a/incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java similarity index 100% rename from incubator/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java rename to runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java diff --git a/incubator/binding-mqtt.spec/COPYRIGHT b/runtime/binding-mqtt/COPYRIGHT similarity index 100% rename from incubator/binding-mqtt.spec/COPYRIGHT rename to runtime/binding-mqtt/COPYRIGHT diff --git a/incubator/binding-mqtt.spec/LICENSE b/runtime/binding-mqtt/LICENSE similarity index 100% rename from incubator/binding-mqtt.spec/LICENSE rename to runtime/binding-mqtt/LICENSE diff --git a/incubator/binding-mqtt/NOTICE b/runtime/binding-mqtt/NOTICE similarity index 100% rename from incubator/binding-mqtt/NOTICE rename to runtime/binding-mqtt/NOTICE diff --git a/incubator/binding-mqtt.spec/NOTICE.template b/runtime/binding-mqtt/NOTICE.template similarity index 100% rename from incubator/binding-mqtt.spec/NOTICE.template rename to runtime/binding-mqtt/NOTICE.template diff --git a/incubator/binding-mqtt-kafka/mvnw b/runtime/binding-mqtt/mvnw similarity index 100% rename from incubator/binding-mqtt-kafka/mvnw rename to runtime/binding-mqtt/mvnw diff --git a/incubator/binding-mqtt-kafka/mvnw.cmd b/runtime/binding-mqtt/mvnw.cmd similarity index 100% rename from incubator/binding-mqtt-kafka/mvnw.cmd rename to runtime/binding-mqtt/mvnw.cmd diff --git a/incubator/binding-mqtt/pom.xml b/runtime/binding-mqtt/pom.xml similarity index 98% rename from incubator/binding-mqtt/pom.xml rename to runtime/binding-mqtt/pom.xml index 75409c60de..b349482243 100644 --- a/incubator/binding-mqtt/pom.xml +++ b/runtime/binding-mqtt/pom.xml @@ -7,13 +7,13 @@ 4.0.0 io.aklivity.zilla - incubator + runtime develop-SNAPSHOT ../pom.xml binding-mqtt - zilla::incubator::binding-mqtt + zilla::runtime::binding-mqtt diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfig.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfig.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSessionConfig.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfig.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBinding.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingContext.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingFactorySpi.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingFactorySpi.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingFactorySpi.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttBindingFactorySpi.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttReasonCodes.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttValidator.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttValidator.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttValidator.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttValidator.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttAuthorizationConfig.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttBindingConfig.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapter.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfig.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapter.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttRouteConfig.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttState.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttState.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttState.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttState.java diff --git a/incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttStreamFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttStreamFactory.java similarity index 100% rename from incubator/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttStreamFactory.java rename to runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttStreamFactory.java diff --git a/incubator/binding-mqtt/src/main/moditect/module-info.java b/runtime/binding-mqtt/src/main/moditect/module-info.java similarity index 100% rename from incubator/binding-mqtt/src/main/moditect/module-info.java rename to runtime/binding-mqtt/src/main/moditect/module-info.java diff --git a/incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi b/runtime/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi similarity index 100% rename from incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi rename to runtime/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.binding.BindingFactorySpi diff --git a/incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi b/runtime/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi similarity index 100% rename from incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi rename to runtime/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.ConditionConfigAdapterSpi diff --git a/incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi b/runtime/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi similarity index 100% rename from incubator/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi rename to runtime/binding-mqtt/src/main/resources/META-INF/services/io.aklivity.zilla.runtime.engine.config.OptionsConfigAdapterSpi diff --git a/incubator/binding-mqtt/src/main/zilla/protocol.idl b/runtime/binding-mqtt/src/main/zilla/protocol.idl similarity index 100% rename from incubator/binding-mqtt/src/main/zilla/protocol.idl rename to runtime/binding-mqtt/src/main/zilla/protocol.idl diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/ValidatorTest.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/ValidatorTest.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/ValidatorTest.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/ValidatorTest.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionConfigAdapterTest.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttOptionsConfigAdapterTest.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java diff --git a/incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java similarity index 100% rename from incubator/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java rename to runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java diff --git a/runtime/pom.xml b/runtime/pom.xml index 0672c167ae..f56e111660 100644 --- a/runtime/pom.xml +++ b/runtime/pom.xml @@ -28,6 +28,8 @@ binding-http-kafka binding-kafka binding-kafka-grpc + binding-mqtt + binding-mqtt-kafka binding-proxy binding-sse binding-sse-kafka @@ -103,6 +105,16 @@ binding-kafka ${project.version} + + ${project.groupId} + binding-mqtt + ${project.version} + + + ${project.groupId} + binding-mqtt-kafka + ${project.version} + ${project.groupId} binding-proxy diff --git a/incubator/binding-mqtt-kafka/COPYRIGHT b/specs/binding-mqtt-kafka.spec/COPYRIGHT similarity index 100% rename from incubator/binding-mqtt-kafka/COPYRIGHT rename to specs/binding-mqtt-kafka.spec/COPYRIGHT diff --git a/incubator/binding-mqtt-kafka/LICENSE b/specs/binding-mqtt-kafka.spec/LICENSE similarity index 100% rename from incubator/binding-mqtt-kafka/LICENSE rename to specs/binding-mqtt-kafka.spec/LICENSE diff --git a/incubator/binding-mqtt-kafka.spec/NOTICE b/specs/binding-mqtt-kafka.spec/NOTICE similarity index 92% rename from incubator/binding-mqtt-kafka.spec/NOTICE rename to specs/binding-mqtt-kafka.spec/NOTICE index 9377db206e..ba1e058bae 100644 --- a/incubator/binding-mqtt-kafka.spec/NOTICE +++ b/specs/binding-mqtt-kafka.spec/NOTICE @@ -14,8 +14,8 @@ This project includes: ICU4J under Unicode/ICU License Jakarta JSON Processing API under Eclipse Public License 2.0 or GNU General Public License, version 2 with the GNU Classpath Exception org.leadpony.justify under The Apache Software License, Version 2.0 - zilla::incubator::binding-mqtt.spec under The Apache Software License, Version 2.0 zilla::specs::binding-kafka.spec under The Apache Software License, Version 2.0 + zilla::specs::binding-mqtt.spec under The Apache Software License, Version 2.0 zilla::specs::binding-proxy.spec under The Apache Software License, Version 2.0 zilla::specs::engine.spec under The Apache Software License, Version 2.0 diff --git a/incubator/binding-mqtt-kafka/NOTICE.template b/specs/binding-mqtt-kafka.spec/NOTICE.template similarity index 100% rename from incubator/binding-mqtt-kafka/NOTICE.template rename to specs/binding-mqtt-kafka.spec/NOTICE.template diff --git a/incubator/binding-mqtt.spec/mvnw b/specs/binding-mqtt-kafka.spec/mvnw similarity index 100% rename from incubator/binding-mqtt.spec/mvnw rename to specs/binding-mqtt-kafka.spec/mvnw diff --git a/incubator/binding-mqtt.spec/mvnw.cmd b/specs/binding-mqtt-kafka.spec/mvnw.cmd similarity index 100% rename from incubator/binding-mqtt.spec/mvnw.cmd rename to specs/binding-mqtt-kafka.spec/mvnw.cmd diff --git a/incubator/binding-mqtt-kafka.spec/pom.xml b/specs/binding-mqtt-kafka.spec/pom.xml similarity index 98% rename from incubator/binding-mqtt-kafka.spec/pom.xml rename to specs/binding-mqtt-kafka.spec/pom.xml index 7a7aa02af1..f19bfcfb9d 100644 --- a/incubator/binding-mqtt-kafka.spec/pom.xml +++ b/specs/binding-mqtt-kafka.spec/pom.xml @@ -7,13 +7,13 @@ 4.0.0 io.aklivity.zilla - incubator + specs develop-SNAPSHOT ../pom.xml binding-mqtt-kafka.spec - zilla::incubator::binding-mqtt-kafka.spec + zilla::specs::binding-mqtt-kafka.spec diff --git a/incubator/binding-mqtt-kafka.spec/src/main/moditect/module-info.java b/specs/binding-mqtt-kafka.spec/src/main/moditect/module-info.java similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/moditect/module-info.java rename to specs/binding-mqtt-kafka.spec/src/main/moditect/module-info.java diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.options.yaml diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.when.capabilities.with.kafka.topic.yaml b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.when.capabilities.with.kafka.topic.yaml similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.when.capabilities.with.kafka.topic.yaml rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.when.capabilities.with.kafka.topic.yaml diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/config/proxy.yaml diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.client.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.empty.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.clients/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.multiple.messages/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.data/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.flush/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.retained/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.data/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.flush/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.distinct/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.properties.repeated/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.with.user.property/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.expire.session.state/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.abort.reconnect.non.clean.start/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.cancel.session.expiry/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.client.takeover/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.close.expire.session.state/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.max.session.expiry/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.connect.override.min.session.expiry/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.exists.clean.start/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.redirect/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.session.expiry.fragmented/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe.via.session.state/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.subscribe/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.after.subscribe/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.unsubscribe.via.session.state/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.cancel.delivery/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.clean.start/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.normal.disconnect/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.abort.reconnect/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.end.reconnect/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.stream.reset.reconnect/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.data/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.client.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.deferred.filter.change.retain/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.buffer/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain.resubscribe/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.filter.change.retain/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.multiple.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.changed.topic.name/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.user.properties.unaltered/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.publish.no.local/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.overlapping.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.receive.message.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retained.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.flush/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.multi.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.single.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filter.two.single.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.both.exact/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.both.exact/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.topic.filters.overlapping.wildcards/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.after.subscribe/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/unsubscribe.topic.filter.single/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.client.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.empty.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.clients/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.multiple.messages/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.one.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.data/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.flush/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.retained/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.data/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.flush/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.distinct/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.properties.repeated/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/publish.with.user.property/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.expire.session.state/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.abort.reconnect.non.clean.start/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.client.takeover/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.close.expire.session.state/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.max.session.expiry/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.connect.override.min.session.expiry/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.exists.clean.start/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.redirect/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe.via.session.state/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.subscribe/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.after.subscribe/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.unsubscribe.via.session.state/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will.retain/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.abort.deliver.will/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.clean.start/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.normal.disconnect/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.reconnect.non.clean.start/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message.takeover.deliver.will/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.will.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.data/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.client.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.deferred.filter.change.retain/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain.resubscribe/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.filter.change.retain/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.multiple.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.user.properties.unaltered/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.publish.no.local/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.overlapping.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.receive.message.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain.as.published/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retain/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.flush/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.retained.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.flush/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.server.sent.reset/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.multi.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.single.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filter.two.single.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.both.exact/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.both.exact/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.topic.filters.overlapping.wildcards/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.after.subscribe/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/client.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/client.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/client.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/server.rpt similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/server.rpt rename to specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/unsubscribe.topic.filter.single/server.rpt diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java rename to specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/config/SchemaTest.java diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java rename to specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java diff --git a/incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java similarity index 100% rename from incubator/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java rename to specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java diff --git a/incubator/binding-mqtt/COPYRIGHT b/specs/binding-mqtt.spec/COPYRIGHT similarity index 100% rename from incubator/binding-mqtt/COPYRIGHT rename to specs/binding-mqtt.spec/COPYRIGHT diff --git a/incubator/binding-mqtt/LICENSE b/specs/binding-mqtt.spec/LICENSE similarity index 100% rename from incubator/binding-mqtt/LICENSE rename to specs/binding-mqtt.spec/LICENSE diff --git a/incubator/binding-mqtt.spec/NOTICE b/specs/binding-mqtt.spec/NOTICE similarity index 100% rename from incubator/binding-mqtt.spec/NOTICE rename to specs/binding-mqtt.spec/NOTICE diff --git a/incubator/binding-mqtt/NOTICE.template b/specs/binding-mqtt.spec/NOTICE.template similarity index 100% rename from incubator/binding-mqtt/NOTICE.template rename to specs/binding-mqtt.spec/NOTICE.template diff --git a/incubator/binding-mqtt/mvnw b/specs/binding-mqtt.spec/mvnw similarity index 100% rename from incubator/binding-mqtt/mvnw rename to specs/binding-mqtt.spec/mvnw diff --git a/incubator/binding-mqtt/mvnw.cmd b/specs/binding-mqtt.spec/mvnw.cmd similarity index 100% rename from incubator/binding-mqtt/mvnw.cmd rename to specs/binding-mqtt.spec/mvnw.cmd diff --git a/incubator/binding-mqtt.spec/pom.xml b/specs/binding-mqtt.spec/pom.xml similarity index 98% rename from incubator/binding-mqtt.spec/pom.xml rename to specs/binding-mqtt.spec/pom.xml index 79b0c478fe..f624bb85ae 100644 --- a/incubator/binding-mqtt.spec/pom.xml +++ b/specs/binding-mqtt.spec/pom.xml @@ -7,13 +7,13 @@ 4.0.0 io.aklivity.zilla - incubator + specs develop-SNAPSHOT ../pom.xml binding-mqtt.spec - zilla::incubator::binding-mqtt.spec + zilla::specs::binding-mqtt.spec diff --git a/incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java similarity index 100% rename from incubator/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java rename to specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java diff --git a/incubator/binding-mqtt.spec/src/main/moditect/module-info.java b/specs/binding-mqtt.spec/src/main/moditect/module-info.java similarity index 100% rename from incubator/binding-mqtt.spec/src/main/moditect/module-info.java rename to specs/binding-mqtt.spec/src/main/moditect/module-info.java diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/services/org.kaazing.k3po.lang.el.spi.FunctionMapperSpi b/specs/binding-mqtt.spec/src/main/resources/META-INF/services/org.kaazing.k3po.lang.el.spi.FunctionMapperSpi similarity index 100% rename from incubator/binding-mqtt.spec/src/main/resources/META-INF/services/org.kaazing.k3po.lang.el.spi.FunctionMapperSpi rename to specs/binding-mqtt.spec/src/main/resources/META-INF/services/org.kaazing.k3po.lang.el.spi.FunctionMapperSpi diff --git a/incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl similarity index 100% rename from incubator/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl rename to specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.or.sessions.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.or.sessions.yaml similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.or.sessions.yaml rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.or.sessions.yaml diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.yaml similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.yaml rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.when.topic.yaml diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.yaml similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.yaml rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/client.yaml diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.password.yaml diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.credentials.username.yaml diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.route.non.default.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.route.non.default.yaml similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.route.non.default.yaml rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.route.non.default.yaml diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.yaml b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.yaml similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.yaml rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/config/server.yaml diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/schema/mqtt.schema.patch.json diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/client.sent.abort/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.delegate.connack.properties/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.exceeded/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.maximum.qos.0/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.connack/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.non.successful.disconnect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.reject.will.retain.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/connect.retain.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/disconnect.after.subscribe.and.publish/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.empty.retained.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.message.with.topic.alias/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.distinct/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.invalid.scope/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.repeated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.messages.with.topic.alias.replaced/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.multiple.messages/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message.subscribe.unfragmented/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.one.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.packet.too.large/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.qos.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.reject.retain.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.retained/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.distinct/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.properties.repeated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/publish.with.user.property/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.abort.reconnect.non.clean.start/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.client.takeover/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.abort/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.override.session.expiry/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect.with.session.expiry/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.connect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.exists.clean.start/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.after.connack/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.redirect.before.connack/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.server.sent.abort/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.multiple.isolated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.publish.routing/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe.via.session.state/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.subscribe/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe.deferred/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.after.subscribe/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.unsubscribe.via.session.state/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.abort/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.normal.disconnect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.will.message.retain/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.get.retained.as.published/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message.user.properties.unaltered/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.one.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.no.local/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.overlapping.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.receive.messages.topic.alias.repeated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reconnect.publish.no.subscription/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.shared.subscriptions.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.subscription.ids.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.retain.as.published/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.multi.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.exact/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.single.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filter.two.single.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.both.exact/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.disjoint.wildcards/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.exact/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.both.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.non.successful/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.topic.filters.overlapping.wildcards/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.after.subscribe/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.aggregated.topic.filters.both.exact/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.publish.unfragmented/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filter.single/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/unsubscribe.topic.filters.non.successful/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.abort/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.close/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/client.sent.reset/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.delegate.connack.properties/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.authentication.method/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.flags/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.invalid.protocol.version/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.exceeded/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.max.packet.size.server.ignores.exceeding.publish.packet/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.maximum.qos.0/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.connack/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.non.successful.disconnect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.failed/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.password.authentication.successful/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.missing.client.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.other.packet.before.connect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.packet.too.large/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.flag.no.password/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.password.no.password.flag/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.topic.alias.maximum.repeated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.missing/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.username.flag.only/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.payload.missing/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.properties.missing/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.retain.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.will.topic.missing/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.retain.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.assigned.client.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.server.defined.keep.alive/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.subscribe.unfragmented/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful.fragmented/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.successful/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.timeout.before.connect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.failed/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.username.authentication.successful/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.invalid.will.qos/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.1.without.will.flag/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.qos.2.without.will.flag/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.will.reject.will.retain.without.will.flag/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.keep.alive.timeout/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.after.subscribe.and.publish/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.invalid.session.expiry/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.no.reasoncode.no.properties/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect.reject.invalid.fixed.header.flags/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/disconnect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.keep.alive/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.no.pingresp/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping.server.override.keep.alive/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/ping/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.empty.retained.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.message.with.topic.alias/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.no.carry.over.topic.alias/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.distinct/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.invalid.scope/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.repeated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.messages.with.topic.alias.replaced/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.unfragmented/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages.with.delay/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.multiple.messages/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message.subscribe.unfragmented/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.one.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.client.sent.subscription.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.invalid.payload.format/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.packet.too.large/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos0.with.packet.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos1.without.packet.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.qos2.without.packet.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.retain.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.exceeds.maximum/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.reject.topic.alias.repeated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.retained/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.topic.not.routed/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.distinct/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.properties.repeated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/publish.with.user.property/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.abort.reconnect.non.clean.start/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.client.takeover/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.override.session.expiry/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.payload.fragmented/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.connect.with.session.expiry/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.exists.clean.start/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.after.connack/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.server.redirect.before.connack/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.multiple.isolated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.publish.routing/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe.via.session.state/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.subscribe/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe.deferred/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.unsubscribe.after.subscribe/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.disconnect.with.will.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.no.ping.within.keep.alive/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.normal.disconnect/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/session.will.message.retain/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.get.retained.as.published/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.fixed.header.flags/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.invalid.topic.filter/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.missing.id.receive.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.user.properties.unaltered/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message.with.invalid.subscription.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.one.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.publish.no.local/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.publish.retained.no.replay/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.qos0.replay.retained.no.packet.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.overlapping.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.message/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.receive.messages.topic.alias.repeated/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reconnect.publish.no.subscription/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.malformed.subscription.options/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.packet.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.missing.topic.filters/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.no.local/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.shared.subscriptions.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.subscription.ids.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.topic.filter.invalid.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.reject.wildcard.subscriptions.not.supported/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.retain.as.published/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.multi.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.and.multi.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.exact/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.single.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filter.two.single.level.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.both.exact/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.aggregated.exact.and.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.disjoint.wildcards/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.exact.and.wildcard/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.non.successful/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.overlapping.wildcards/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.after.subscribe/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.aggregated.topic.filters.both.exact/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.subscription/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.no.matching.topic.filter/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.publish.unfragmented/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.invalid.fixed.header.flags/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.missing.packet.id/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.reject.no.topic.filter/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filter.single/server.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/client.rpt diff --git a/incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt similarity index 100% rename from incubator/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt rename to specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/unsubscribe.topic.filters.non.successful/server.rpt diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/config/SchemaTest.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/ConnectionIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/PublishIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/UnsubscribeIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/ConnectionIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PingIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/PublishIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SessionIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java diff --git a/incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java similarity index 100% rename from incubator/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java rename to specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/UnsubscribeIT.java diff --git a/specs/pom.xml b/specs/pom.xml index 8c5a58df02..5979459274 100644 --- a/specs/pom.xml +++ b/specs/pom.xml @@ -26,12 +26,14 @@ binding-tls.spec binding-http.spec binding-grpc.spec + binding-mqtt.spec binding-sse.spec binding-ws.spec binding-kafka.spec binding-http-filesystem.spec binding-http-kafka.spec binding-grpc-kafka.spec + binding-mqtt-kafka.spec binding-sse-kafka.spec binding-kafka-grpc.spec exporter-prometheus.spec @@ -104,6 +106,16 @@ binding-http-filesystem.spec ${project.version} + + ${project.groupId} + binding-mqtt.spec + ${project.version} + + + ${project.groupId} + binding-mqtt-kafka.spec + ${project.version} + ${project.groupId} binding-sse-kafka.spec From 0cf9790b90fb06f64536df1151ba55cdddb52cf2 Mon Sep 17 00:00:00 2001 From: Attila Kreiner Date: Wed, 20 Sep 2023 20:59:12 +0200 Subject: [PATCH 097/115] Generate zilla.yaml for asyncapi.mqtt.proxy from an AsyncAPI definition (#375) --- incubator/command-config/pom.xml | 6 + .../internal/airline/ZillaConfigCommand.java | 10 +- .../AsyncApiMqttProxyConfigGenerator.java | 344 ++++++++++++++++++ .../src/main/moditect/module-info.java | 1 + .../AsyncApiMqttProxyConfigGeneratorTest.java | 78 ++++ .../mqtt/proxy/complete/asyncapi.yaml | 61 ++++ .../asyncapi/mqtt/proxy/complete/zilla.yaml | 77 ++++ .../asyncapi/mqtt/proxy/plain/asyncapi.yaml | 58 +++ .../asyncapi/mqtt/proxy/plain/zilla.yaml | 32 ++ .../asyncapi/mqtt/proxy/tls/asyncapi.yaml | 58 +++ .../asyncapi/mqtt/proxy/tls/zilla.yaml | 72 ++++ .../config/MqttConditionConfigBuilder.java | 10 + .../mqtt/config/MqttPublishConfigBuilder.java | 52 +++ .../config/MqttSubscribeConfigBuilder.java | 52 +++ 14 files changed, 909 insertions(+), 2 deletions(-) create mode 100644 incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGenerator.java create mode 100644 incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGeneratorTest.java create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/complete/asyncapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/complete/zilla.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/plain/asyncapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/plain/zilla.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/tls/asyncapi.yaml create mode 100644 incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/tls/zilla.yaml create mode 100644 runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfigBuilder.java create mode 100644 runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfigBuilder.java diff --git a/incubator/command-config/pom.xml b/incubator/command-config/pom.xml index fa369b72cc..4d8ec3933d 100644 --- a/incubator/command-config/pom.xml +++ b/incubator/command-config/pom.xml @@ -55,6 +55,12 @@ ${project.version} provided + + io.aklivity.zilla + binding-mqtt + ${project.version} + provided + io.aklivity.zilla binding-tcp diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java index 095acbd193..32a6cd245a 100644 --- a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java @@ -31,6 +31,7 @@ import io.aklivity.zilla.runtime.command.ZillaCommand; import io.aklivity.zilla.runtime.command.config.internal.asyncapi.http.proxy.AsyncApiHttpProxyConfigGenerator; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.mqtt.proxy.AsyncApiMqttProxyConfigGenerator; import io.aklivity.zilla.runtime.command.config.internal.openapi.http.proxy.OpenApiHttpProxyConfigGenerator; @Command(name = "config", description = "Generate configuration file") @@ -38,13 +39,18 @@ public final class ZillaConfigCommand extends ZillaCommand { private static final Map> GENERATORS = Map.of( "openapi.http.proxy", OpenApiHttpProxyConfigGenerator::new, - "asyncapi.http.proxy", AsyncApiHttpProxyConfigGenerator::new + "asyncapi.http.proxy", AsyncApiHttpProxyConfigGenerator::new, + "asyncapi.mqtt.proxy", AsyncApiMqttProxyConfigGenerator::new ); @Option(name = {"-t", "--template"}, description = "Template name") @Required - @AllowedValues(allowedValues = {"openapi.http.proxy", "asyncapi.http.proxy"}) + @AllowedValues(allowedValues = { + "openapi.http.proxy", + "asyncapi.http.proxy", + "asyncapi.mqtt.proxy" + }) public String template; @Option(name = {"-i", "--input"}, diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGenerator.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGenerator.java new file mode 100644 index 0000000000..e8c0e7cd18 --- /dev/null +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGenerator.java @@ -0,0 +1,344 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.mqtt.proxy; + +import static io.aklivity.zilla.runtime.engine.config.KindConfig.CLIENT; +import static io.aklivity.zilla.runtime.engine.config.KindConfig.SERVER; +import static java.util.Objects.requireNonNull; +import static org.agrona.LangUtil.rethrowUnchecked; + +import java.io.InputStream; +import java.net.URI; +import java.util.List; +import java.util.Map; + +import jakarta.json.Json; +import jakarta.json.JsonPatch; +import jakarta.json.JsonPatchBuilder; +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; + +import io.aklivity.zilla.runtime.binding.mqtt.config.MqttConditionConfig; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpConditionConfig; +import io.aklivity.zilla.runtime.binding.tcp.config.TcpOptionsConfig; +import io.aklivity.zilla.runtime.binding.tls.config.TlsOptionsConfig; +import io.aklivity.zilla.runtime.command.config.internal.airline.ConfigGenerator; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.AsyncApi; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.model.Channel; +import io.aklivity.zilla.runtime.command.config.internal.asyncapi.view.ServerView; +import io.aklivity.zilla.runtime.engine.config.BindingConfigBuilder; +import io.aklivity.zilla.runtime.engine.config.ConfigWriter; +import io.aklivity.zilla.runtime.engine.config.NamespaceConfig; +import io.aklivity.zilla.runtime.engine.config.NamespaceConfigBuilder; +import io.aklivity.zilla.runtime.vault.filesystem.config.FileSystemOptionsConfig; + +public class AsyncApiMqttProxyConfigGenerator extends ConfigGenerator +{ + private final InputStream inputStream; + + private AsyncApi asyncApi; + private int[] allPorts; + private int[] mqttPorts; + private int[] mqttsPorts; + private boolean isPlainEnabled; + private boolean isTlsEnabled; + + public AsyncApiMqttProxyConfigGenerator( + InputStream input) + { + this.inputStream = input; + } + + @Override + public String generate() + { + this.asyncApi = parseAsyncApi(inputStream); + this.allPorts = resolveAllPorts(); + this.mqttPorts = resolvePortsForScheme("mqtt"); + this.mqttsPorts = resolvePortsForScheme("mqtts"); + this.isPlainEnabled = mqttPorts != null; + this.isTlsEnabled = mqttsPorts != null; + ConfigWriter configWriter = new ConfigWriter(null); + String yaml = configWriter.write(createNamespace(), createEnvVarsPatch()); + return unquoteEnvVars(yaml, unquotedEnvVars()); + } + + private AsyncApi parseAsyncApi( + InputStream inputStream) + { + AsyncApi asyncApi = null; + try (Jsonb jsonb = JsonbBuilder.create()) + { + asyncApi = jsonb.fromJson(inputStream, AsyncApi.class); + } + catch (Exception ex) + { + rethrowUnchecked(ex); + } + return asyncApi; + } + + private int[] resolveAllPorts() + { + int[] ports = new int[asyncApi.servers.size()]; + String[] keys = asyncApi.servers.keySet().toArray(String[]::new); + for (int i = 0; i < asyncApi.servers.size(); i++) + { + ServerView server = ServerView.of(asyncApi.servers.get(keys[i])); + URI url = server.url(); + ports[i] = url.getPort(); + } + return ports; + } + + private int[] resolvePortsForScheme( + String scheme) + { + requireNonNull(scheme); + int[] ports = null; + URI url = findFirstServerUrlWithScheme(scheme); + if (url != null) + { + ports = new int[] {url.getPort()}; + } + return ports; + } + + private URI findFirstServerUrlWithScheme( + String scheme) + { + requireNonNull(scheme); + URI result = null; + for (String key : asyncApi.servers.keySet()) + { + ServerView server = ServerView.of(asyncApi.servers.get(key)); + if (scheme.equals(server.url().getScheme())) + { + result = server.url(); + break; + } + } + return result; + } + + private NamespaceConfig createNamespace() + { + return NamespaceConfig.builder() + .name("example") + .binding() + .name("tcp_server0") + .type("tcp") + .kind(SERVER) + .options(TcpOptionsConfig::builder) + .host("0.0.0.0") + .ports(allPorts) + .build() + .inject(this::injectPlainTcpRoute) + .inject(this::injectTlsTcpRoute) + .build() + .inject(this::injectTlsServer) + .binding() + .name("mqtt_server0") + .type("mqtt") + .kind(SERVER) + .inject(this::injectMqttServerRoutes) + .build() + .binding() + .name("mqtt_client0") + .type("mqtt") + .kind(CLIENT) + .exit(isTlsEnabled ? "tls_client0" : "tcp_client0") + .build() + .inject(this::injectTlsClient) + .binding() + .name("tcp_client0") + .type("tcp") + .kind(CLIENT) + .options(TcpOptionsConfig::builder) + .host("") // env + .ports(new int[]{0}) // env + .build() + .build() + .inject(this::injectVaults) + .build(); + } + + private BindingConfigBuilder> injectPlainTcpRoute( + BindingConfigBuilder> binding) + { + if (isPlainEnabled) + { + binding + .route() + .when(TcpConditionConfig::builder) + .ports(mqttPorts) + .build() + .exit("mqtt_server0") + .build(); + } + return binding; + } + + private BindingConfigBuilder> injectTlsTcpRoute( + BindingConfigBuilder> binding) + { + if (isTlsEnabled) + { + binding + .route() + .when(TcpConditionConfig::builder) + .ports(mqttsPorts) + .build() + .exit("tls_server0") + .build(); + } + return binding; + } + + private NamespaceConfigBuilder injectTlsServer( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .binding() + .name("tls_server0") + .type("tls") + .kind(SERVER) + .options(TlsOptionsConfig::builder) + .keys(List.of("")) // env + .sni(List.of("")) // env + .alpn(List.of("")) // env + .build() + .vault("server") + .exit("mqtt_server0") + .build(); + } + return namespace; + } + + private BindingConfigBuilder> injectMqttServerRoutes( + BindingConfigBuilder> binding) + { + for (Map.Entry entry : asyncApi.channels.entrySet()) + { + String topic = entry.getValue().address.replaceAll("\\{[^}]+\\}", "*"); + binding + .route() + .when(MqttConditionConfig::builder) + .publish() + .topic(topic) + .build() + .build() + .when(MqttConditionConfig::builder) + .subscribe() + .topic(topic) + .build() + .build() + .exit("mqtt_client0") + .build(); + } + return binding; + } + + private NamespaceConfigBuilder injectTlsClient( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .binding() + .name("tls_client0") + .type("tls") + .kind(CLIENT) + .options(TlsOptionsConfig::builder) + .trust(List.of("")) // env + .sni(List.of("")) // env + .alpn(List.of("")) // env + .trustcacerts(true) + .build() + .vault("client") + .exit("tcp_client0") + .build(); + } + return namespace; + } + + private NamespaceConfigBuilder injectVaults( + NamespaceConfigBuilder namespace) + { + if (isTlsEnabled) + { + namespace + .vault() + .name("client") + .type("filesystem") + .options(FileSystemOptionsConfig::builder) + .trust() + .store("") // env + .type("") // env + .password("") // env + .build() + .build() + .build() + .vault() + .name("server") + .type("filesystem") + .options(FileSystemOptionsConfig::builder) + .keys() + .store("") // env + .type("") // env + .password("") //env + .build() + .build() + .build(); + } + return namespace; + } + + private JsonPatch createEnvVarsPatch() + { + JsonPatchBuilder patch = Json.createPatchBuilder(); + patch.replace("/bindings/tcp_client0/options/host", "${{env.TCP_CLIENT_HOST}}"); + patch.replace("/bindings/tcp_client0/options/port", "${{env.TCP_CLIENT_PORT}}"); + + if (isTlsEnabled) + { + // tls_server0 binding + patch.replace("/bindings/tls_server0/options/keys/0", "${{env.TLS_SERVER_KEY}}"); + patch.replace("/bindings/tls_server0/options/sni/0", "${{env.TLS_SERVER_SNI}}"); + patch.replace("/bindings/tls_server0/options/alpn/0", "${{env.TLS_SERVER_ALPN}}"); + // tls_client0 binding + patch.replace("/bindings/tls_client0/options/trust/0", "${{env.TLS_CLIENT_TRUST}}"); + patch.replace("/bindings/tls_client0/options/sni/0", "${{env.TLS_CLIENT_SNI}}"); + patch.replace("/bindings/tls_client0/options/alpn/0", "${{env.TLS_CLIENT_ALPN}}"); + // client vault + patch.replace("/vaults/client/options/trust/store", "${{env.TRUSTSTORE_PATH}}"); + patch.replace("/vaults/client/options/trust/type", "${{env.TRUSTSTORE_TYPE}}"); + patch.replace("/vaults/client/options/trust/password", "${{env.TRUSTSTORE_PASSWORD}}"); + // server vault + patch.replace("/vaults/server/options/keys/store", "${{env.KEYSTORE_PATH}}"); + patch.replace("/vaults/server/options/keys/type", "${{env.KEYSTORE_TYPE}}"); + patch.replace("/vaults/server/options/keys/password", "${{env.KEYSTORE_PASSWORD}}"); + } + + return patch.build(); + } + + private List unquotedEnvVars() + { + return List.of("TCP_CLIENT_PORT"); + } +} diff --git a/incubator/command-config/src/main/moditect/module-info.java b/incubator/command-config/src/main/moditect/module-info.java index 014b16646a..ec140c6d79 100644 --- a/incubator/command-config/src/main/moditect/module-info.java +++ b/incubator/command-config/src/main/moditect/module-info.java @@ -17,6 +17,7 @@ requires io.aklivity.zilla.runtime.command; requires io.aklivity.zilla.runtime.engine; requires io.aklivity.zilla.runtime.binding.http; + requires io.aklivity.zilla.runtime.binding.mqtt; requires io.aklivity.zilla.runtime.binding.tcp; requires io.aklivity.zilla.runtime.binding.tls; requires io.aklivity.zilla.runtime.guard.jwt; diff --git a/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGeneratorTest.java b/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGeneratorTest.java new file mode 100644 index 0000000000..56ef3aaf4e --- /dev/null +++ b/incubator/command-config/src/test/java/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/AsyncApiMqttProxyConfigGeneratorTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2021-2023 Aklivity Inc + * + * Licensed under the Aklivity Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * https://www.aklivity.io/aklivity-community-license/ + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.aklivity.zilla.runtime.command.config.internal.asyncapi.mqtt.proxy; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import org.junit.jupiter.api.Test; + +public class AsyncApiMqttProxyConfigGeneratorTest +{ + @Test + public void shouldGeneratePlainConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("plain/asyncapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("plain/zilla.yaml").getFile())); + AsyncApiMqttProxyConfigGenerator generator = new AsyncApiMqttProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } + + @Test + public void shouldGenerateTlsConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("tls/asyncapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("tls/zilla.yaml").getFile())); + AsyncApiMqttProxyConfigGenerator generator = new AsyncApiMqttProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } + + @Test + public void shouldGenerateCompleteConfig() throws Exception + { + try (InputStream inputStream = getClass().getResourceAsStream("complete/asyncapi.yaml")) + { + // GIVEN + String expectedResult = Files.readString(Path.of(getClass().getResource("complete/zilla.yaml").getFile())); + AsyncApiMqttProxyConfigGenerator generator = new AsyncApiMqttProxyConfigGenerator(inputStream); + + // WHEN + String result = generator.generate(); + + // THEN + assertThat(result, equalTo(expectedResult)); + } + } +} diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/complete/asyncapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/complete/asyncapi.yaml new file mode 100644 index 0000000000..b3bbaceddf --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/complete/asyncapi.yaml @@ -0,0 +1,61 @@ +asyncapi: 3.0.0 +info: + title: Zilla MQTT Proxy + version: 1.0.0 + license: + name: Aklivity Community License +servers: + secure: + host: mqtts://localhost:8883 + protocol: secure-mqtt + plain: + host: mqtt://localhost:1883 + protocol: mqtt +defaultContentType: application/json + +channels: + smartylighting: + address: "smartylighting/streetlights/1/0/event/{streetlightId}/lighting/measured" + title: MQTT Topic to produce & consume topic. + parameters: + streetlightId: + $ref: '#/components/parameters/streetlightId' + messages: + items: + $ref: '#/components/messages/item' + +operations: + sendEvents: + action: send + channel: + $ref: '#/channels/smartylighting' + + receiveEvents: + action: receive + channel: + $ref: '#/channels/smartylighting' + +components: + parameters: + streetlightId: + description: Street Light ID + location: $message.header#/id + messages: + item: + name: event + title: An event + headers: + type: object + properties: + idempotency-key: + description: Unique identifier for a given event + type: string + id: + description: Street Light ID + type: string + contentType: application/json + payload: + type: object + properties: + item: + $ref: "#/components/schemas/item" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/complete/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/complete/zilla.yaml new file mode 100644 index 0000000000..619880a249 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/complete/zilla.yaml @@ -0,0 +1,77 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 1883 + - 8883 + routes: + - exit: mqtt_server0 + when: + - port: 1883 + - exit: tls_server0 + when: + - port: 8883 + tls_server0: + vault: server + type: tls + kind: server + options: + keys: + - "${{env.TLS_SERVER_KEY}}" + sni: + - "${{env.TLS_SERVER_SNI}}" + alpn: + - "${{env.TLS_SERVER_ALPN}}" + exit: mqtt_server0 + mqtt_server0: + type: mqtt + kind: server + routes: + - exit: mqtt_client0 + when: + - publish: + - topic: smartylighting/streetlights/1/0/event/*/lighting/measured + - subscribe: + - topic: smartylighting/streetlights/1/0/event/*/lighting/measured + mqtt_client0: + type: mqtt + kind: client + exit: tls_client0 + tls_client0: + vault: client + type: tls + kind: client + options: + trust: + - "${{env.TLS_CLIENT_TRUST}}" + trustcacerts: true + sni: + - "${{env.TLS_CLIENT_SNI}}" + alpn: + - "${{env.TLS_CLIENT_ALPN}}" + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} +vaults: + client: + type: filesystem + options: + trust: + store: "${{env.TRUSTSTORE_PATH}}" + type: "${{env.TRUSTSTORE_TYPE}}" + password: "${{env.TRUSTSTORE_PASSWORD}}" + server: + type: filesystem + options: + keys: + store: "${{env.KEYSTORE_PATH}}" + type: "${{env.KEYSTORE_TYPE}}" + password: "${{env.KEYSTORE_PASSWORD}}" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/plain/asyncapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/plain/asyncapi.yaml new file mode 100644 index 0000000000..57b918e490 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/plain/asyncapi.yaml @@ -0,0 +1,58 @@ +asyncapi: 3.0.0 +info: + title: Zilla MQTT Proxy + version: 1.0.0 + license: + name: Aklivity Community License +servers: + plain: + host: mqtt://localhost:1883 + protocol: mqtt +defaultContentType: application/json + +channels: + smartylighting: + address: "smartylighting/streetlights/1/0/event/{streetlightId}/lighting/measured" + title: MQTT Topic to produce & consume topic. + parameters: + streetlightId: + $ref: '#/components/parameters/streetlightId' + messages: + items: + $ref: '#/components/messages/item' + +operations: + sendEvents: + action: send + channel: + $ref: '#/channels/smartylighting' + + receiveEvents: + action: receive + channel: + $ref: '#/channels/smartylighting' + +components: + parameters: + streetlightId: + description: Street Light ID + location: $message.header#/id + messages: + item: + name: event + title: An event + headers: + type: object + properties: + idempotency-key: + description: Unique identifier for a given event + type: string + id: + description: Street Light ID + type: string + contentType: application/json + payload: + type: object + properties: + item: + $ref: "#/components/schemas/item" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/plain/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/plain/zilla.yaml new file mode 100644 index 0000000000..9ed3ffa917 --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/plain/zilla.yaml @@ -0,0 +1,32 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: 1883 + routes: + - exit: mqtt_server0 + when: + - port: 1883 + mqtt_server0: + type: mqtt + kind: server + routes: + - exit: mqtt_client0 + when: + - publish: + - topic: smartylighting/streetlights/1/0/event/*/lighting/measured + - subscribe: + - topic: smartylighting/streetlights/1/0/event/*/lighting/measured + mqtt_client0: + type: mqtt + kind: client + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/tls/asyncapi.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/tls/asyncapi.yaml new file mode 100644 index 0000000000..499d8926ff --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/tls/asyncapi.yaml @@ -0,0 +1,58 @@ +asyncapi: 3.0.0 +info: + title: Zilla MQTT Proxy + version: 1.0.0 + license: + name: Aklivity Community License +servers: + secure: + host: mqtts://localhost:8883 + protocol: secure-mqtt +defaultContentType: application/json + +channels: + smartylighting: + address: "smartylighting/streetlights/1/0/event/{streetlightId}/lighting/measured" + title: MQTT Topic to produce & consume topic. + parameters: + streetlightId: + $ref: '#/components/parameters/streetlightId' + messages: + items: + $ref: '#/components/messages/item' + +operations: + sendEvents: + action: send + channel: + $ref: '#/channels/smartylighting' + + receiveEvents: + action: receive + channel: + $ref: '#/channels/smartylighting' + +components: + parameters: + streetlightId: + description: Street Light ID + location: $message.header#/id + messages: + item: + name: event + title: An event + headers: + type: object + properties: + idempotency-key: + description: Unique identifier for a given event + type: string + id: + description: Street Light ID + type: string + contentType: application/json + payload: + type: object + properties: + item: + $ref: "#/components/schemas/item" diff --git a/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/tls/zilla.yaml b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/tls/zilla.yaml new file mode 100644 index 0000000000..f5046ffb0b --- /dev/null +++ b/incubator/command-config/src/test/resources/io/aklivity/zilla/runtime/command/config/internal/asyncapi/mqtt/proxy/tls/zilla.yaml @@ -0,0 +1,72 @@ +name: example +bindings: + tcp_server0: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: 8883 + routes: + - exit: tls_server0 + when: + - port: 8883 + tls_server0: + vault: server + type: tls + kind: server + options: + keys: + - "${{env.TLS_SERVER_KEY}}" + sni: + - "${{env.TLS_SERVER_SNI}}" + alpn: + - "${{env.TLS_SERVER_ALPN}}" + exit: mqtt_server0 + mqtt_server0: + type: mqtt + kind: server + routes: + - exit: mqtt_client0 + when: + - publish: + - topic: smartylighting/streetlights/1/0/event/*/lighting/measured + - subscribe: + - topic: smartylighting/streetlights/1/0/event/*/lighting/measured + mqtt_client0: + type: mqtt + kind: client + exit: tls_client0 + tls_client0: + vault: client + type: tls + kind: client + options: + trust: + - "${{env.TLS_CLIENT_TRUST}}" + trustcacerts: true + sni: + - "${{env.TLS_CLIENT_SNI}}" + alpn: + - "${{env.TLS_CLIENT_ALPN}}" + exit: tcp_client0 + tcp_client0: + type: tcp + kind: client + options: + host: "${{env.TCP_CLIENT_HOST}}" + port: ${{env.TCP_CLIENT_PORT}} +vaults: + client: + type: filesystem + options: + trust: + store: "${{env.TRUSTSTORE_PATH}}" + type: "${{env.TRUSTSTORE_TYPE}}" + password: "${{env.TRUSTSTORE_PASSWORD}}" + server: + type: filesystem + options: + keys: + store: "${{env.KEYSTORE_PATH}}" + type: "${{env.KEYSTORE_TYPE}}" + password: "${{env.KEYSTORE_PASSWORD}}" diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java index 00276b09ec..551e2230ba 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttConditionConfigBuilder.java @@ -60,6 +60,11 @@ public MqttConditionConfigBuilder subscribe( return this; } + public MqttSubscribeConfigBuilder> subscribe() + { + return new MqttSubscribeConfigBuilder<>(this::subscribe); + } + public MqttConditionConfigBuilder publish( MqttPublishConfig publish) { @@ -67,6 +72,11 @@ public MqttConditionConfigBuilder publish( return this; } + public MqttPublishConfigBuilder> publish() + { + return new MqttPublishConfigBuilder<>(this::publish); + } + @Override public T build() { diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfigBuilder.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfigBuilder.java new file mode 100644 index 0000000000..0c74378307 --- /dev/null +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttPublishConfigBuilder.java @@ -0,0 +1,52 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public class MqttPublishConfigBuilder extends ConfigBuilder> +{ + private final Function mapper; + + private String topic; + + public MqttPublishConfigBuilder(Function mapper) + { + this.mapper = mapper; + } + + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + + public MqttPublishConfigBuilder topic( + String topic) + { + this.topic = topic; + return this; + } + + @Override + public T build() + { + return mapper.apply(new MqttPublishConfig(topic)); + } +} diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfigBuilder.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfigBuilder.java new file mode 100644 index 0000000000..f413e8d17c --- /dev/null +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/config/MqttSubscribeConfigBuilder.java @@ -0,0 +1,52 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.mqtt.config; + +import java.util.function.Function; + +import io.aklivity.zilla.runtime.engine.config.ConfigBuilder; + +public class MqttSubscribeConfigBuilder extends ConfigBuilder> +{ + private final Function mapper; + + private String topic; + + public MqttSubscribeConfigBuilder(Function mapper) + { + this.mapper = mapper; + } + + @Override + @SuppressWarnings("unchecked") + protected Class> thisType() + { + return (Class>) getClass(); + } + + public MqttSubscribeConfigBuilder topic( + String topic) + { + this.topic = topic; + return this; + } + + @Override + public T build() + { + return mapper.apply(new MqttSubscribeConfig(topic)); + } +} From e43fb117daa49d2a95802dd6a765ab020da4daf2 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Wed, 20 Sep 2023 21:31:27 +0200 Subject: [PATCH 098/115] Add affinity to mqtt server and client binding (#436) --- .../internal/config/MqttConditionMatcher.java | 18 +-- .../internal/stream/MqttClientFactory.java | 131 +++++++++--------- .../internal/stream/MqttServerFactory.java | 6 +- .../internal/stream/client/SubscribeIT.java | 9 ++ .../client.rpt | 47 +++++++ .../server.rpt | 23 +++ .../client.rpt | 4 + .../client.rpt | 4 + .../mqtt/streams/application/SubscribeIT.java | 9 ++ 9 files changed, 178 insertions(+), 73 deletions(-) create mode 100644 specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.invalid.affinity/client.rpt create mode 100644 specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.invalid.affinity/server.rpt diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java index 1c18272037..eb98ea423a 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/config/MqttConditionMatcher.java @@ -33,21 +33,21 @@ public MqttConditionMatcher( MqttConditionConfig condition) { this.sessionMatchers = - condition.sessions != null ? + condition.sessions != null && !condition.sessions.isEmpty() ? asWildcardMatcher(condition.sessions.stream().map(s -> s.clientId).collect(Collectors.toList())) : null; this.subscribeMatchers = - condition.subscribes != null ? + condition.subscribes != null && !condition.subscribes.isEmpty() ? asTopicMatcher(condition.subscribes.stream().map(s -> s.topic).collect(Collectors.toList())) : null; this.publishMatchers = - condition.publishes != null ? + condition.publishes != null && !condition.publishes.isEmpty() ? asTopicMatcher(condition.publishes.stream().map(s -> s.topic).collect(Collectors.toList())) : null; } public boolean matchesSession( String clientId) { - boolean match = false; - if (sessionMatchers != null) + boolean match = sessionMatchers == null; + if (!match) { for (Matcher matcher : sessionMatchers) { @@ -64,8 +64,8 @@ public boolean matchesSession( public boolean matchesSubscribe( String topic) { - boolean match = false; - if (subscribeMatchers != null) + boolean match = subscribeMatchers == null; + if (!match) { for (Matcher matcher : subscribeMatchers) { @@ -82,8 +82,8 @@ public boolean matchesSubscribe( public boolean matchesPublish( String topic) { - boolean match = false; - if (publishMatchers != null) + boolean match = publishMatchers == null; + if (!match) { for (Matcher matcher : publishMatchers) { diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java index 8f8021ea0f..90bdc5f46f 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java @@ -294,7 +294,7 @@ public final class MqttClientFactory implements MqttStreamFactory private final MqttClientDecoder decodeUnknownType = this::decodeUnknownType; private final Map decodersByPacketType; - private final Int2ObjectHashMap clients; + private final Long2ObjectHashMap clients; private int maximumPacketSize; @@ -376,7 +376,7 @@ public MqttClientFactory( this.maximumPacketSize = writeBuffer.capacity(); this.encodeBudgetMax = bufferPool.slotCapacity(); this.utf8Decoder = StandardCharsets.UTF_8.newDecoder(); - this.clients = new Int2ObjectHashMap<>(); + this.clients = new Long2ObjectHashMap<>(); } @Override @@ -407,6 +407,7 @@ public MessageConsumer newStream( final long routedId = begin.routedId(); final long initialId = begin.streamId(); final long authorization = begin.authorization(); + final long affinity = begin.affinity(); MqttBindingConfig binding = bindings.get(routedId); @@ -425,27 +426,20 @@ public MessageConsumer newStream( assert typeId == mqttTypeId; final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); - String16FW clientId; - MqttClient client; - switch (mqttBeginEx.kind()) + final int kind = mqttBeginEx.kind(); + + MqttClient client = resolveClient(routedId, resolvedId, supplyInitialId.applyAsLong(resolvedId), affinity, kind); + switch (kind) { case MqttBeginExFW.KIND_SESSION: - clientId = mqttBeginEx.session().clientId(); - client = resolveClient(routedId, resolvedId, supplyInitialId.applyAsLong(resolvedId), clientId); client.sessionStream = new MqttSessionStream(client, sender, originId, routedId, initialId); newStream = client.sessionStream::onSession; break; case MqttBeginExFW.KIND_PUBLISH: - final MqttPublishBeginExFW publishBeginEx = mqttBeginEx.publish(); - clientId = publishBeginEx.clientId(); - client = resolveClient(routedId, resolvedId, supplyInitialId.applyAsLong(resolvedId), clientId); MqttPublishStream publishStream = new MqttPublishStream(client, sender, originId, routedId, initialId); newStream = publishStream::onPublish; break; case MqttBeginExFW.KIND_SUBSCRIBE: - final MqttSubscribeBeginExFW subscribeBeginEx = mqttBeginEx.subscribe(); - clientId = subscribeBeginEx.clientId(); - client = resolveClient(routedId, resolvedId, supplyInitialId.applyAsLong(resolvedId), clientId); MqttSubscribeStream subscribeStream = new MqttSubscribeStream(client, sender, originId, routedId, initialId); newStream = subscribeStream::onSubscribe; break; @@ -459,17 +453,11 @@ private MqttClient resolveClient( long routedId, long resolvedId, long initialId, - String16FW clientId) - { - final int clientKey = clientKey(clientId.asString()); - return clients.computeIfAbsent(clientKey, - s -> new MqttClient(routedId, resolvedId, initialId, maximumPacketSize)); - } - - private int clientKey( - String client) + long affinity, + int kind) { - return Math.abs(client.hashCode()); + return kind == MqttBeginExFW.KIND_SESSION ? clients.computeIfAbsent(affinity, + s -> new MqttClient(routedId, resolvedId, initialId, maximumPacketSize)) : clients.get(affinity); } private MessageConsumer newStream( @@ -1044,6 +1032,7 @@ private boolean invalidUtf8( catch (CharacterCodingException ex) { invalid = true; + utf8Decoder.reset(); } return invalid; } @@ -2861,7 +2850,6 @@ private void onSessionBegin( initialMax = maximum; state = MqttState.openingInitial(state); - client.doNetworkBegin(traceId, authorization, affinity); final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); @@ -3227,36 +3215,45 @@ private void onSubscribeBegin( final long affinity = begin.affinity(); final OctetsFW extension = begin.extension(); - assert acknowledge <= sequence; - assert sequence >= initialSeq; - assert acknowledge >= initialAck; + onSubscribeBegin: + { + if (client == null) + { + doSubscribeReset(traceId, authorization); + break onSubscribeBegin; + } - initialSeq = sequence; - initialAck = acknowledge; - initialMax = maximum; - state = MqttState.openingInitial(state); + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; - final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + initialSeq = sequence; + initialAck = acknowledge; + initialMax = maximum; + state = MqttState.openingInitial(state); - assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SUBSCRIBE; - final MqttSubscribeBeginExFW mqttSubscribeBeginEx = mqttBeginEx.subscribe(); + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); - final Array32FW filters = mqttSubscribeBeginEx.filters(); + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_SUBSCRIBE; + final MqttSubscribeBeginExFW mqttSubscribeBeginEx = mqttBeginEx.subscribe(); - filters.forEach(filter -> - { - Subscription subscription = new Subscription(); - subscription.id = (int) filter.subscriptionId(); - subscription.filter = filter.pattern().asString(); - subscription.flags = filter.flags(); - subscription.qos = filter.qos(); - subscriptions.add(subscription); - }); - final int qos = subscriptions.get(0).qos; - client.subscribeStreams.put(qos, this); + final Array32FW filters = mqttSubscribeBeginEx.filters(); - doSubscribeBegin(traceId, authorization, affinity); - doSubscribeWindow(traceId, authorization, client.encodeSlotOffset, encodeBudgetMax); + filters.forEach(filter -> + { + Subscription subscription = new Subscription(); + subscription.id = (int) filter.subscriptionId(); + subscription.filter = filter.pattern().asString(); + subscription.flags = filter.flags(); + subscription.qos = filter.qos(); + subscriptions.add(subscription); + }); + final int qos = subscriptions.get(0).qos; + client.subscribeStreams.put(qos, this); + + doSubscribeBegin(traceId, authorization, affinity); + doSubscribeWindow(traceId, authorization, client.encodeSlotOffset, encodeBudgetMax); + } } private void onSubscribeFlush( @@ -3566,6 +3563,7 @@ private void onPublish( private void onPublishBegin( BeginFW begin) { + final long sequence = begin.sequence(); final long acknowledge = begin.acknowledge(); final int maximum = begin.maximum(); @@ -3574,25 +3572,34 @@ private void onPublishBegin( final long affinity = begin.affinity(); final OctetsFW extension = begin.extension(); - assert acknowledge <= sequence; - assert sequence >= initialSeq; - assert acknowledge >= initialAck; + onPublishBegin: + { + if (client == null) + { + doPublishReset(traceId, authorization); + break onPublishBegin; + } - initialSeq = sequence; - initialAck = acknowledge; - initialMax = maximum; - state = MqttState.openingInitial(state); + assert acknowledge <= sequence; + assert sequence >= initialSeq; + assert acknowledge >= initialAck; - final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); + initialSeq = sequence; + initialAck = acknowledge; + initialMax = maximum; + state = MqttState.openingInitial(state); + + final MqttBeginExFW mqttBeginEx = extension.get(mqttBeginExRO::tryWrap); - assert mqttBeginEx.kind() == MqttBeginExFW.KIND_PUBLISH; - final MqttPublishBeginExFW mqttPublishBeginEx = mqttBeginEx.publish(); + assert mqttBeginEx.kind() == MqttBeginExFW.KIND_PUBLISH; + final MqttPublishBeginExFW mqttPublishBeginEx = mqttBeginEx.publish(); - this.topic = mqttPublishBeginEx.topic().asString(); - client.publishStreams.add(this); + this.topic = mqttPublishBeginEx.topic().asString(); + client.publishStreams.add(this); - doPublishBegin(traceId, authorization, affinity); - doPublishWindow(traceId, authorization, client.encodeSlotOffset, encodeBudgetMax); + doPublishBegin(traceId, authorization, affinity); + doPublishWindow(traceId, authorization, client.encodeSlotOffset, encodeBudgetMax); + } } private void onPublishData( diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 80ae2afed8..765bbb2053 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -971,6 +971,7 @@ private boolean invalidUtf8( catch (CharacterCodingException ex) { invalid = true; + utf8Decoder.reset(); } return invalid; } @@ -1199,7 +1200,6 @@ private final class MqttServer private final long routedId; private final long initialId; private final long replyId; - private final long affinity; private final long encodeBudgetId; private final Int2ObjectHashMap publishStreams; @@ -1215,6 +1215,7 @@ private final class MqttServer private String16FW clientId; + private long affinity; private long decodeSeq; private long decodeAck; private int decodeMax; @@ -1282,7 +1283,6 @@ private MqttServer( this.routedId = routedId; this.initialId = initialId; this.replyId = replyId; - this.affinity = affinity; this.encodeBudgetId = budgetId; this.decoder = decodeInitialType; this.publishStreams = new Int2ObjectHashMap<>(); @@ -1341,8 +1341,10 @@ private void onNetworkBegin( { final long traceId = begin.traceId(); final long authorization = begin.authorization(); + final long streamId = begin.streamId(); state = MqttState.openingInitial(state); + affinity = streamId; doNetworkBegin(traceId, authorization); doNetworkWindow(traceId, authorization, 0, 0L, 0, bufferPool.slotCapacity()); diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java index 585ec9a9a1..2941c395fb 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java @@ -312,4 +312,13 @@ public void shouldReceiveReconnectNoSubscription() throws Exception { k3po.finish(); } + + @Test + @Configuration("client.yaml") + @Specification({ + "${app}/subscribe.publish.invalid.affinity/client"}) + public void shouldAbortSubscribeAndPublishInvalidAffinity() throws Exception + { + k3po.finish(); + } } diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.invalid.affinity/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.invalid.affinity/client.rpt new file mode 100644 index 0000000000..e8cb3e6508 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.invalid.affinity/client.rpt @@ -0,0 +1,47 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:affinity 0xc1 + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .subscribe() + .clientId("client") + .filter("sensor/one", 1, "AT_MOST_ONCE", "NO_LOCAL") + .build() + .build()} + +connect aborted + + +connect "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:affinity 0xc1 + +write zilla:begin.ext ${mqtt:beginEx() + .typeId(zilla:id("mqtt")) + .publish() + .clientId("client") + .topic("sensor/one") + .flags("RETAIN") + .build() + .build()} + +connect aborted diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.invalid.affinity/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.invalid.affinity/server.rpt new file mode 100644 index 0000000000..e896a47016 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.publish.invalid.affinity/server.rpt @@ -0,0 +1,23 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/app0" + option zilla:window 8192 + option zilla:transmission "duplex" + +rejected + +rejected \ No newline at end of file diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt index 53da0e66b6..ea318306af 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.publish.retained.no.replay/client.rpt @@ -17,6 +17,7 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:affinity 0xc1 write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) @@ -47,6 +48,7 @@ connect await RECEIVED_SESSION_STATE "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:affinity 0xc1 write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) @@ -75,6 +77,7 @@ connect await SENT_RETAIN_DATA "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:affinity 0xc2 write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) @@ -121,6 +124,7 @@ connect await RECEIVED_SESSION_STATE2 "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:affinity 0xc2 write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt index 3d6019dfe0..0c193373c8 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/subscribe.qos0.replay.retained.no.packet.id/client.rpt @@ -17,6 +17,7 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:affinity 0xc1 write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) @@ -48,6 +49,7 @@ connect await RECEIVED_SESSION_STATE "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:affinity 0xc1 write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) @@ -75,6 +77,7 @@ connect await SENT_RETAIN_DATA "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:affinity 0xc2 write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) @@ -121,6 +124,7 @@ connect await RECEIVED_SESSION_STATE2 "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "duplex" + option zilla:affinity 0xc2 write zilla:begin.ext ${mqtt:beginEx() .typeId(zilla:id("mqtt")) diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java index 3ffc0a1d49..89c231aafb 100644 --- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java +++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SubscribeIT.java @@ -251,4 +251,13 @@ public void shouldReceiveReconnectNoSubscription() throws Exception { k3po.finish(); } + + @Test + @Specification({ + "${app}/subscribe.publish.invalid.affinity/client", + "${app}/subscribe.publish.invalid.affinity/server"}) + public void shouldAbortSubscribeAndPublishInvalidAffinity() throws Exception + { + k3po.finish(); + } } From 2318388f2d5fdd7d824afe544b311e30fae87c3e Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 21 Sep 2023 14:00:57 -0700 Subject: [PATCH 099/115] Ensure socket channel has finished connecting before attempting to read (#441) --- .../binding/tcp/internal/stream/TcpClientFactory.java | 6 ++++++ .../zilla/runtime/binding/tcp/internal/stream/TcpState.java | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientFactory.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientFactory.java index 2e2bb6c244..d75b87d5e7 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientFactory.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpClientFactory.java @@ -330,8 +330,14 @@ private int onNetReadable( ((Buffer) readByteBuffer).position(0); ((Buffer) readByteBuffer).limit(limit); + read: try { + if (!TcpState.opened(state)) + { + break read; + } + final int bytesRead = net.read(readByteBuffer); if (bytesRead == -1) diff --git a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpState.java b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpState.java index 239f656862..cf52ffe211 100644 --- a/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpState.java +++ b/runtime/binding-tcp/src/main/java/io/aklivity/zilla/runtime/binding/tcp/internal/stream/TcpState.java @@ -122,6 +122,12 @@ static boolean replyClosed( return (state & REPLY_CLOSED) != 0; } + static boolean opened( + int state) + { + return initialOpened(state) && replyOpened(state); + } + private TcpState() { // utility From 5215c754b57adbc61dcfbbfb3d911a3cc767a0b7 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Fri, 22 Sep 2023 01:29:39 +0200 Subject: [PATCH 100/115] Mqtt subscription handling bugfix (#439) --- .../mqtt/internal/MqttConfiguration.java | 59 +++++++++- .../internal/stream/MqttClientFactory.java | 9 +- .../internal/stream/MqttServerFactory.java | 107 ++++++++---------- .../mqtt/internal/MqttConfigurationTest.java | 6 +- .../internal/stream/server/SessionIT.java | 3 - .../internal/stream/server/SubscribeIT.java | 20 +++- .../connect.reject.second.connect/client.rpt | 5 +- .../connect.reject.second.connect/server.rpt | 7 +- .../client.rpt | 59 ++++++++++ .../server.rpt | 60 ++++++++++ .../mqtt/streams/network/SubscribeIT.java | 9 ++ 11 files changed, 264 insertions(+), 80 deletions(-) create mode 100644 specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact.no.subscription.id/client.rpt create mode 100644 specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact.no.subscription.id/server.rpt diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java index a6f0fc1357..cf23bf107f 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfiguration.java @@ -15,7 +15,14 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.internal; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.util.Random; import java.util.concurrent.TimeUnit; +import java.util.function.IntSupplier; + +import org.agrona.LangUtil; import io.aklivity.zilla.runtime.engine.Configuration; @@ -36,7 +43,8 @@ public class MqttConfiguration extends Configuration public static final BooleanPropertyDef NO_LOCAL; public static final IntPropertyDef SESSION_EXPIRY_GRACE_PERIOD; public static final PropertyDef CLIENT_ID; - public static final PropertyDef SERVER_REFERENCE; + public static final PropertyDef SUBSCRIPTION_ID; + public static final int GENERATED_SUBSCRIPTION_ID_MASK = 0x70; static { @@ -56,7 +64,8 @@ public class MqttConfiguration extends Configuration NO_LOCAL = config.property("no.local", true); SESSION_EXPIRY_GRACE_PERIOD = config.property("session.expiry.grace.period", 30); CLIENT_ID = config.property("client.id"); - SERVER_REFERENCE = config.property("server.reference"); + SUBSCRIPTION_ID = config.property(IntSupplier.class, "subscription.id", + MqttConfiguration::decodeIntSupplier, MqttConfiguration::defaultSubscriptionId); MQTT_CONFIG = config; } @@ -121,8 +130,50 @@ public String clientId() return CLIENT_ID.get(this); } - public String serverReference() + + public IntSupplier subscriptionId() + { + return SUBSCRIPTION_ID.get(this); + } + + private static IntSupplier decodeIntSupplier( + String fullyQualifiedMethodName) + { + IntSupplier supplier = null; + + try + { + MethodType signature = MethodType.methodType(int.class); + String[] parts = fullyQualifiedMethodName.split("::"); + Class ownerClass = Class.forName(parts[0]); + String methodName = parts[1]; + MethodHandle method = MethodHandles.publicLookup().findStatic(ownerClass, methodName, signature); + supplier = () -> + { + int value = 0; + try + { + value = (int) method.invoke(); + } + catch (Throwable ex) + { + LangUtil.rethrowUnchecked(ex); + } + + return value; + }; + } + catch (Throwable ex) + { + LangUtil.rethrowUnchecked(ex); + } + + return supplier; + } + + private static int defaultSubscriptionId() { - return SERVER_REFERENCE.get(this); + int randomValue = Math.abs(new Random().nextInt()); + return randomValue | GENERATED_SUBSCRIPTION_ID_MASK; } } diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java index 90bdc5f46f..518c04eccc 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java @@ -15,6 +15,7 @@ */ package io.aklivity.zilla.runtime.binding.mqtt.internal.stream; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.GENERATED_SUBSCRIPTION_ID_MASK; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.BAD_AUTHENTICATION_METHOD; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.MALFORMED_PACKET; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttReasonCodes.NORMAL_DISCONNECT; @@ -2309,7 +2310,7 @@ private void doEncodeSubscribe( MqttPropertyFW mqttProperty; final int subscriptionId = subscriptions.get(0).id; - if (subscriptionId != 0) + if (subscriptionId != 0 && !generatedSubscriptionId(subscriptionId)) { mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity()) .subscriptionId(i -> i.set(subscriptionId)) @@ -2351,6 +2352,12 @@ private void doEncodeSubscribe( doNetworkData(traceId, authorization, 0L, subscribe); } + private boolean generatedSubscriptionId( + int subscriptionId) + { + return (subscriptionId & GENERATED_SUBSCRIPTION_ID_MASK) == GENERATED_SUBSCRIPTION_ID_MASK; + } + private void doEncodeUnsubscribe( long traceId, long authorization, diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 765bbb2053..79b84c4968 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -86,6 +86,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.IntSupplier; import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; @@ -304,7 +305,7 @@ public final class MqttServerFactory implements MqttStreamFactory private final MqttServerDecoder decodeUnknownType = this::decodeUnknownType; private final Map decodersByPacketType; - private final String serverRef; + private final IntSupplier supplySubscriptionId; private int maximumPacketSize; { @@ -402,10 +403,10 @@ public MqttServerFactory( this.encodeBudgetMax = bufferPool.slotCapacity(); this.validator = new MqttValidator(); this.utf8Decoder = StandardCharsets.UTF_8.newDecoder(); + this.supplySubscriptionId = config.subscriptionId(); final Optional clientId = Optional.ofNullable(config.clientId()).map(String16FW::new); this.supplyClientId = clientId.isPresent() ? clientId::get : () -> new String16FW(UUID.randomUUID().toString()); - this.serverRef = config.serverReference(); } @Override @@ -825,6 +826,27 @@ private int decodeConnect( break decode; } + if (server.connected) + { + reasonCode = PROTOCOL_ERROR; + break decode; + } + + if (mqttConnect.clientId().length() > MAXIMUM_CLIENT_ID_LENGTH) + { + reasonCode = CLIENT_IDENTIFIER_NOT_VALID; + break decode; + } + + final MqttPropertiesFW properties = mqttConnect.properties(); + + reasonCode = server.decodeConnectProperties(properties); + + if (reasonCode != SUCCESS) + { + break decode; + } + progress = server.onDecodeConnect(traceId, authorization, buffer, progress, limit, mqttConnect); final int decodedLength = progress - offset - 2; @@ -1632,58 +1654,28 @@ private int onDecodeConnect( { final String16FW clientIdentifier = connect.clientId(); this.assignedClientId = false; - byte reasonCode; - decode: - { - if (connected) - { - reasonCode = PROTOCOL_ERROR; - break decode; - } - final int length = clientIdentifier.length(); + final int length = clientIdentifier.length(); - if (length == 0) - { - this.clientId = supplyClientId.get(); - this.assignedClientId = true; - } - else if (length > MAXIMUM_CLIENT_ID_LENGTH) - { - reasonCode = CLIENT_IDENTIFIER_NOT_VALID; - break decode; - } - else - { - this.clientId = new String16FW(clientIdentifier.asString()); - } - - final MqttPropertiesFW properties = connect.properties(); - - reasonCode = decodeConnectProperties(properties); - - if (reasonCode != SUCCESS) - { - break decode; - } - - keepAlive = (short) Math.min(Math.max(connect.keepAlive(), keepAliveMinimum), keepAliveMaximum); - serverDefinedKeepAlive = keepAlive != connect.keepAlive(); - keepAliveTimeout = Math.round(TimeUnit.SECONDS.toMillis(keepAlive) * 1.5); - connectFlags = connect.flags(); - doSignalKeepAliveTimeout(); - doCancelConnectTimeout(); + if (length == 0) + { + this.clientId = supplyClientId.get(); + this.assignedClientId = true; } - - progress = connect.limit(); - if (reasonCode != SUCCESS) + else { - doCancelConnectTimeout(); - doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null); - doNetworkEnd(traceId, authorization); - decoder = decodeIgnoreAll; + this.clientId = new String16FW(clientIdentifier.asString()); } + keepAlive = (short) Math.min(Math.max(connect.keepAlive(), keepAliveMinimum), keepAliveMaximum); + serverDefinedKeepAlive = keepAlive != connect.keepAlive(); + keepAliveTimeout = Math.round(TimeUnit.SECONDS.toMillis(keepAlive) * 1.5); + connectFlags = connect.flags(); + doSignalKeepAliveTimeout(); + doCancelConnectTimeout(); + + progress = connect.limit(); + return progress; } @@ -1980,7 +1972,6 @@ private void onDecodeSubscribe( decode: { - for (int decodeProgress = decodeOffset; decodeProgress < decodeLimit; ) { final MqttSubscribePayloadFW mqttSubscribePayload = @@ -2037,11 +2028,15 @@ private void onDecodeSubscribe( break; } + if (!containsSubscriptionId) + { + subscriptionId = supplySubscriptionId.getAsInt(); + } + Subscription subscription = new Subscription(); subscription.id = subscriptionId; subscription.filter = filter; subscription.flags = flags; - //TODO: what if we don't have a subscriptionId subscribePacketIds.put(subscriptionId, packetId); newSubscriptions.add(subscription); @@ -3381,7 +3376,7 @@ private void onSessionData( newState.add(subscription); }); List currentSubscriptions = sessionStream.subscriptions(); - if (newState.size() > currentSubscriptions.size()) + if (newState.size() >= currentSubscriptions.size()) { List newSubscriptions = newState.stream() .filter(s -> !currentSubscriptions.contains(s)) @@ -4589,16 +4584,6 @@ private static int decodeConnectFlags( return reasonCode; } - private static DirectBuffer copyBuffer( - DirectBuffer buffer, - int index, - int length) - { - UnsafeBuffer copy = new UnsafeBuffer(new byte[length]); - copy.putBytes(0, buffer, index, length); - return copy; - } - private final class MqttConnectPayload { private byte reasonCode = SUCCESS; diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java index f31a8246d9..d1669a24d6 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/MqttConfigurationTest.java @@ -23,9 +23,9 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.NO_LOCAL; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.RETAIN_AVAILABLE; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SERVER_REFERENCE; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SESSION_EXPIRY_GRACE_PERIOD; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SHARED_SUBSCRIPTION; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SUBSCRIPTION_ID; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.SUBSCRIPTION_IDENTIFIERS; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.TOPIC_ALIAS_MAXIMUM; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.WILDCARD_SUBSCRIPTION; @@ -48,7 +48,7 @@ public class MqttConfigurationTest public static final String NO_LOCAL_NAME = "zilla.binding.mqtt.no.local"; public static final String SESSION_EXPIRY_GRACE_PERIOD_NAME = "zilla.binding.mqtt.session.expiry.grace.period"; public static final String CLIENT_ID_NAME = "zilla.binding.mqtt.client.id"; - public static final String SERVER_REFERENCE_NAME = "zilla.binding.mqtt.server.reference"; + public static final String SUBSCRIPTION_ID_NAME = "zilla.binding.mqtt.subscription.id"; @Test public void shouldVerifyConstants() @@ -66,6 +66,6 @@ public void shouldVerifyConstants() assertEquals(NO_LOCAL.name(), NO_LOCAL_NAME); assertEquals(SESSION_EXPIRY_GRACE_PERIOD.name(), SESSION_EXPIRY_GRACE_PERIOD_NAME); assertEquals(CLIENT_ID.name(), CLIENT_ID_NAME); - assertEquals(SERVER_REFERENCE.name(), SERVER_REFERENCE_NAME); + assertEquals(SUBSCRIPTION_ID.name(), SUBSCRIPTION_ID_NAME); } } diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java index aaceb67114..6495045ddb 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java @@ -17,7 +17,6 @@ import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.KEEP_ALIVE_MINIMUM_NAME; -import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SERVER_REFERENCE_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -224,7 +223,6 @@ public void shouldClientTakeOverSession() throws Exception @Specification({ "${net}/session.server.redirect.after.connack/client", "${app}/session.server.redirect.after.connack/server"}) - @Configure(name = SERVER_REFERENCE_NAME, value = "mqtt-1.example.com:1883") public void shouldRedirectAfterConnack() throws Exception { k3po.finish(); @@ -235,7 +233,6 @@ public void shouldRedirectAfterConnack() throws Exception @Specification({ "${net}/session.server.redirect.before.connack/client", "${app}/session.server.redirect.before.connack/server"}) - @Configure(name = SERVER_REFERENCE_NAME, value = "mqtt-1.example.com:1883") public void shouldRedirectBeforeConnack() throws Exception { k3po.finish(); diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java index 5f2b0a061a..dd27bfb33e 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java @@ -14,9 +14,9 @@ * under the License. */ package io.aklivity.zilla.runtime.binding.mqtt.internal.stream.server; - import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfiguration.PUBLISH_TIMEOUT; import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.NO_LOCAL_NAME; +import static io.aklivity.zilla.runtime.binding.mqtt.internal.MqttConfigurationTest.SUBSCRIPTION_ID_NAME; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; @@ -48,6 +48,8 @@ public class SubscribeIT .counterValuesBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) + .configure(SUBSCRIPTION_ID_NAME, + "io.aklivity.zilla.runtime.binding.mqtt.internal.stream.server.SubscribeIT::supplySubscriptionId") .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") .external("app0") .clean(); @@ -165,6 +167,16 @@ public void shouldFilterIsolatedBothExact() throws Exception k3po.finish(); } + @Test + @Configuration("server.yaml") + @Specification({ + "${net}/subscribe.topic.filters.isolated.both.exact.no.subscription.id/client", + "${app}/subscribe.topic.filters.isolated.both.exact/server"}) + public void shouldFilterIsolatedBothExactNoSubscriptionId() throws Exception + { + k3po.finish(); + } + @Test @Configuration("server.yaml") @Specification({ @@ -365,4 +377,10 @@ public void shouldFilterNonSuccessful() throws Exception { k3po.finish(); } + + private static int subscriptionId = 0; + public static int supplySubscriptionId() + { + return ++subscriptionId; + } } diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt index b5bcdf864d..4c1ac6cfc0 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/client.rpt @@ -43,9 +43,8 @@ write [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -read [0x20 0x03] # CONNACK - [0x00] # flags = none +read [0xe0 0x02] # DISCONNECT [0x82] # reason = protocol error [0x00] # properties = none -read closed +read closed diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt index b67de5c22f..8a49f9d9ab 100644 --- a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/connect.reject.second.connect/server.rpt @@ -44,9 +44,8 @@ read [0x10 0x13] # CONNECT [0x00] # properties = none [0x00 0x06] "client" # client id -write [0x20 0x03] # CONNACK - [0x00] # flags = none - [0x82] # reason = protocol error - [0x00] # properties = none +write [0xe0 0x02] # DISCONNECT + [0x82] # reason = protocol error + [0x00] # properties = none write close diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact.no.subscription.id/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact.no.subscription.id/client.rpt new file mode 100644 index 0000000000..6c387a6a09 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact.no.subscription.id/client.rpt @@ -0,0 +1,59 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +connect "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +connected + +write [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +read [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +write [0x82 0x10] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x00] # properties + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +write [0x82 0x10] # SUBSCRIBE + [0x00 0x02] # packet id = 2 + [0x00] # properties + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +read [0x90 0x04] # SUBACK + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00] # reason code diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact.no.subscription.id/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact.no.subscription.id/server.rpt new file mode 100644 index 0000000000..e823ccd680 --- /dev/null +++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/subscribe.topic.filters.isolated.both.exact.no.subscription.id/server.rpt @@ -0,0 +1,60 @@ +# +# Copyright 2021-2023 Aklivity Inc. +# +# Aklivity licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +accept "zilla://streams/net0" + option zilla:window 8192 + option zilla:transmission "duplex" + option zilla:byteorder "network" + +accepted +connected + +read [0x10 0x18] # CONNECT + [0x00 0x04] "MQTT" # protocol name + [0x05] # protocol version + [0x02] # flags = clean start + [0x00 0x3c] # keep alive = 60s + [0x05] # properties + [0x27] 66560 # maximum packet size = 66560 + [0x00 0x06] "client" # client id + +write [0x20 0x08] # CONNACK + [0x00] # flags = none + [0x00] # reason code + [0x05] # properties = none + [0x27] 66560 # maximum packet size = 66560 + +read [0x82 0x10] # SUBSCRIBE + [0x00 0x01] # packet id = 1 + [0x00] # properties + [0x00 0x0a] "sensor/one" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x01] # packet id = 1 + [0x00] # properties = none + [0x00] # reason code + +read [0x82 0x10] # SUBSCRIBE + [0x00 0x02] # packet id = 2 + [0x00] # properties + [0x00 0x0a] "sensor/two" # topic filter + [0x20] # options = at-most-once + +write [0x90 0x04] # SUBACK + [0x00 0x02] # packet id = 2 + [0x00] # properties = none + [0x00] # reason codes diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java index db43f319ee..cd7a26499c 100644 --- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java +++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/SubscribeIT.java @@ -182,6 +182,15 @@ public void shouldFilterIsolatedBothExact() throws Exception k3po.finish(); } + @Test + @Specification({ + "${net}/subscribe.topic.filters.isolated.both.exact.no.subscription.id/client", + "${net}/subscribe.topic.filters.isolated.both.exact.no.subscription.id/server"}) + public void shouldFilterIsolatedBothExactNoSubscriptionId() throws Exception + { + k3po.finish(); + } + @Test @Specification({ "${net}/subscribe.topic.filters.isolated.both.wildcard/client", From 94bb9f6d57dd2ec70ca75f96bf0e69941f9076e8 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 21 Sep 2023 19:36:30 -0700 Subject: [PATCH 101/115] Remove unused engine configuration (#442) --- .../internal/stream/server/AdvisoryIT.java | 4 +- .../internal/stream/server/AmqpServerIT.java | 4 +- .../echo/internal/streams/ServerIT.java | 4 +- .../fan/internal/streams/ServerIT.java | 4 +- .../internal/stream/FileSystemServerIT.java | 4 +- .../stream/GrpcKafkaFetchProxyIT.java | 4 +- .../stream/GrpcKafkaProduceProxyIT.java | 4 +- .../streams/client/BidiStreamRpcIT.java | 4 +- .../streams/client/ClientStreamRpcIT.java | 4 +- .../streams/client/ServerStreamRpcIT.java | 4 +- .../internal/streams/client/UnaryRpcIT.java | 4 +- .../streams/server/BidiStreamRpcIT.java | 4 +- .../streams/server/ClientStreamRpcIT.java | 4 +- .../streams/server/RejectedRpcIT.java | 4 +- .../streams/server/ServerStreamRpcIT.java | 4 +- .../internal/streams/server/UnaryRpcIT.java | 4 +- .../stream/HttpFileSystemProxyIT.java | 4 +- .../internal/stream/HttpKafkaProxyIT.java | 4 +- .../streams/rfc7230/client/AdvisoryIT.java | 4 +- .../rfc7230/client/ArchitectureIT.java | 4 +- .../client/ConnectionManagementIT.java | 4 +- .../ConnectionManagementPoolSize1IT.java | 4 +- .../streams/rfc7230/client/FlowControlIT.java | 4 +- .../rfc7230/client/FlowControlLimitsIT.java | 4 +- .../rfc7230/client/MessageFormatIT.java | 4 +- .../rfc7230/client/TransferCodingsIT.java | 4 +- .../rfc7230/server/AccessControlIT.java | 4 +- .../streams/rfc7230/server/AdvisoryIT.java | 4 +- .../rfc7230/server/ArchitectureIT.java | 4 +- .../rfc7230/server/AuthorizationIT.java | 4 +- .../server/ConnectionManagementIT.java | 4 +- .../streams/rfc7230/server/FlowControlIT.java | 4 +- .../rfc7230/server/FlowControlLimitsIT.java | 4 +- .../rfc7230/server/MessageFormatIT.java | 4 +- .../rfc7230/server/TransferCodingsIT.java | 4 +- .../streams/rfc7540/client/AbortIT.java | 4 +- .../streams/rfc7540/client/ConfigIT.java | 4 +- .../client/ConnectionManagementIT.java | 4 +- .../streams/rfc7540/client/FlowControlIT.java | 4 +- .../rfc7540/client/MessageFormatIT.java | 4 +- .../streams/rfc7540/client/StartingIT.java | 4 +- .../streams/rfc7540/server/AbortIT.java | 4 +- .../rfc7540/server/AccessControlIT.java | 4 +- .../rfc7540/server/AuthorizationIT.java | 4 +- .../streams/rfc7540/server/ConfigIT.java | 4 +- .../server/ConnectionManagementIT.java | 4 +- .../streams/rfc7540/server/FlowControlIT.java | 4 +- .../rfc7540/server/MessageFormatIT.java | 4 +- .../streams/rfc7540/server/SettingsIT.java | 4 +- .../streams/rfc7540/server/StartingIT.java | 4 +- .../stream/KafkaGrpcRemoteServerIT.java | 4 +- .../internal/stream/CacheBootstrapIT.java | 4 +- .../internal/stream/CacheConsumerIT.java | 4 +- .../internal/stream/CacheDescribeIT.java | 4 +- .../kafka/internal/stream/CacheFetchIT.java | 4 +- .../kafka/internal/stream/CacheGroupIT.java | 4 +- .../kafka/internal/stream/CacheMergedIT.java | 4 +- .../kafka/internal/stream/CacheMetaIT.java | 4 +- .../internal/stream/CacheOffsetFetchIT.java | 4 +- .../kafka/internal/stream/CacheProduceIT.java | 4 +- .../internal/stream/ClientDescribeIT.java | 4 +- .../internal/stream/ClientDescribeSaslIT.java | 4 +- .../kafka/internal/stream/ClientFetchIT.java | 4 +- .../internal/stream/ClientFetchSaslIT.java | 4 +- .../kafka/internal/stream/ClientGroupIT.java | 4 +- .../internal/stream/ClientGroupSaslIT.java | 4 +- .../kafka/internal/stream/ClientMergedIT.java | 4 +- .../kafka/internal/stream/ClientMetaIT.java | 4 +- .../internal/stream/ClientMetaSaslIT.java | 4 +- .../internal/stream/ClientOffsetFetchIT.java | 4 +- .../internal/stream/ClientProduceIT.java | 4 +- .../internal/stream/ClientProduceSaslIT.java | 4 +- .../stream/MqttKafkaPublishProxyIT.java | 4 +- .../stream/MqttKafkaSessionProxyIT.java | 4 +- .../stream/MqttKafkaSubscribeProxyIT.java | 4 +- .../internal/stream/client/ConnectionIT.java | 4 +- .../mqtt/internal/stream/client/PingIT.java | 4 +- .../internal/stream/client/PublishIT.java | 4 +- .../internal/stream/client/SubscribeIT.java | 4 +- .../internal/stream/client/UnsubscribeIT.java | 4 +- .../internal/stream/server/ConnectionIT.java | 4 +- .../mqtt/internal/stream/server/PingIT.java | 4 +- .../internal/stream/server/PublishIT.java | 4 +- .../internal/stream/server/SessionIT.java | 4 +- .../internal/stream/server/SubscribeIT.java | 4 +- .../internal/stream/server/UnsubscribeIT.java | 4 +- .../proxy/internal/streams/ProxyClientIT.java | 4 +- .../proxy/internal/streams/ProxyServerIT.java | 4 +- .../internal/stream/SseKafkaProxyIT.java | 4 +- .../internal/streams/client/AdvisoryIT.java | 4 +- .../streams/client/ByteOrderMarkIT.java | 4 +- .../sse/internal/streams/client/DataIT.java | 4 +- .../internal/streams/client/EndOfLineIT.java | 4 +- .../sse/internal/streams/client/ErrorIT.java | 4 +- .../internal/streams/client/HandshakeIT.java | 4 +- .../internal/streams/client/ReconnectIT.java | 4 +- .../sse/internal/streams/client/TypeIT.java | 4 +- .../internal/streams/server/AdvisoryIT.java | 4 +- .../internal/streams/server/ChallengeIT.java | 4 +- .../sse/internal/streams/server/DataIT.java | 4 +- .../sse/internal/streams/server/ErrorIT.java | 4 +- .../internal/streams/server/HandshakeIT.java | 4 +- .../sse/internal/streams/server/IdIT.java | 5 +- .../internal/streams/server/ReconnectIT.java | 4 +- .../internal/streams/server/TimestampIT.java | 4 +- .../sse/internal/streams/server/TypeIT.java | 4 +- .../streams/ClientIOExceptionFromReadIT.java | 4 +- .../streams/ClientIOExceptionFromWriteIT.java | 4 +- .../tcp/internal/streams/ClientIT.java | 4 +- .../tcp/internal/streams/ClientLimitsIT.java | 4 +- .../streams/ClientPartialWriteIT.java | 4 +- .../streams/ClientPartialWriteLimitsIT.java | 4 +- .../streams/ClientResetAndAbortIT.java | 4 +- .../tcp/internal/streams/ClientRoutingIT.java | 4 +- .../streams/ServerIOExceptionFromReadIT.java | 4 +- .../streams/ServerIOExceptionFromWriteIT.java | 4 +- .../tcp/internal/streams/ServerIT.java | 4 +- .../tcp/internal/streams/ServerLimitsIT.java | 5 +- .../streams/ServerPartialWriteIT.java | 4 +- .../streams/ServerPartialWriteLimitsIT.java | 6 +- .../streams/ServerResetAndAbortIT.java | 4 +- .../tcp/internal/streams/ServerRoutingIT.java | 4 +- .../internal/streams/ClientFragmentedIT.java | 4 +- .../tls/internal/streams/ClientIT.java | 4 +- .../binding/tls/internal/streams/ProxyIT.java | 4 +- .../internal/streams/ServerFragmentedIT.java | 4 +- .../tls/internal/streams/ServerIT.java | 4 +- .../internal/streams/client/AdvisoryIT.java | 4 +- .../streams/client/BaseFramingIT.java | 4 +- .../streams/client/FlowControlIT.java | 4 +- .../streams/client/OpeningHandshakeIT.java | 4 +- .../internal/streams/server/AdvisoryIT.java | 4 +- .../streams/server/BaseFramingIT.java | 4 +- .../streams/server/ClosingHandshakeIT.java | 4 +- .../ws/internal/streams/server/ControlIT.java | 4 +- .../streams/server/FlowControlIT.java | 4 +- .../streams/server/FragmentationIT.java | 4 +- .../streams/server/OpeningHandshakeIT.java | 4 +- .../runtime/engine/EngineConfiguration.java | 71 ++++--------------- .../internal/registry/DispatchAgent.java | 6 +- .../runtime/engine/internal/EngineIT.java | 4 +- .../engine/internal/ReconfigureFileIT.java | 4 +- .../engine/internal/ReconfigureHttpIT.java | 4 +- .../zilla/runtime/engine/test/EngineRule.java | 26 ++----- .../guard/jwt/internal/JwtGuardIT.java | 4 +- .../internal/FileSystemVaultIT.java | 4 +- 146 files changed, 165 insertions(+), 514 deletions(-) diff --git a/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/stream/server/AdvisoryIT.java b/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/stream/server/AdvisoryIT.java index f67fc69e9f..c38b02dd34 100644 --- a/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/stream/server/AdvisoryIT.java +++ b/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/stream/server/AdvisoryIT.java @@ -42,9 +42,7 @@ public class AdvisoryIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(2048) - .responseBufferCapacity(2048) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(AMQP_CONTAINER_ID, "server") .configure(ENGINE_DRAIN_ON_CLOSE, false) .configure(AMQP_CLOSE_EXCHANGE_TIMEOUT, 500) diff --git a/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/stream/server/AmqpServerIT.java b/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/stream/server/AmqpServerIT.java index 75eb20bfae..2f3e2218b8 100644 --- a/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/stream/server/AmqpServerIT.java +++ b/incubator/binding-amqp/src/test/java/io/aklivity/zilla/runtime/binding/amqp/internal/stream/server/AmqpServerIT.java @@ -49,9 +49,7 @@ public class AmqpServerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(AMQP_CONTAINER_ID, "server") .configure(ENGINE_DRAIN_ON_CLOSE, false) .configure(AMQP_CLOSE_EXCHANGE_TIMEOUT, 500) diff --git a/runtime/binding-echo/src/test/java/io/aklivity/zilla/runtime/binding/echo/internal/streams/ServerIT.java b/runtime/binding-echo/src/test/java/io/aklivity/zilla/runtime/binding/echo/internal/streams/ServerIT.java index 00d2961f6d..cbfe28fb19 100644 --- a/runtime/binding-echo/src/test/java/io/aklivity/zilla/runtime/binding/echo/internal/streams/ServerIT.java +++ b/runtime/binding-echo/src/test/java/io/aklivity/zilla/runtime/binding/echo/internal/streams/ServerIT.java @@ -38,9 +38,7 @@ public class ServerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/echo/config") .clean(); diff --git a/runtime/binding-fan/src/test/java/io/aklivity/zilla/runtime/binding/fan/internal/streams/ServerIT.java b/runtime/binding-fan/src/test/java/io/aklivity/zilla/runtime/binding/fan/internal/streams/ServerIT.java index b851a936b0..b327a0d7d1 100644 --- a/runtime/binding-fan/src/test/java/io/aklivity/zilla/runtime/binding/fan/internal/streams/ServerIT.java +++ b/runtime/binding-fan/src/test/java/io/aklivity/zilla/runtime/binding/fan/internal/streams/ServerIT.java @@ -39,9 +39,7 @@ public class ServerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/fan/config") .external("app0") .clean(); diff --git a/runtime/binding-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/filesystem/internal/stream/FileSystemServerIT.java b/runtime/binding-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/filesystem/internal/stream/FileSystemServerIT.java index de69ccadb8..f4e4ce46ff 100644 --- a/runtime/binding-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/filesystem/internal/stream/FileSystemServerIT.java +++ b/runtime/binding-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/filesystem/internal/stream/FileSystemServerIT.java @@ -44,9 +44,7 @@ public class FileSystemServerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/filesystem/config") .external("app0") .clean(); diff --git a/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/stream/GrpcKafkaFetchProxyIT.java b/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/stream/GrpcKafkaFetchProxyIT.java index 02c0dc3702..d899699271 100644 --- a/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/stream/GrpcKafkaFetchProxyIT.java +++ b/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/stream/GrpcKafkaFetchProxyIT.java @@ -40,9 +40,7 @@ public class GrpcKafkaFetchProxyIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/kafka/config") diff --git a/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/stream/GrpcKafkaProduceProxyIT.java b/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/stream/GrpcKafkaProduceProxyIT.java index 8645086fcf..1470ed8a24 100644 --- a/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/stream/GrpcKafkaProduceProxyIT.java +++ b/runtime/binding-grpc-kafka/src/test/java/io/aklivity/zilla/runtime/blinding/grpc/kafka/internal/stream/GrpcKafkaProduceProxyIT.java @@ -39,9 +39,7 @@ public class GrpcKafkaProduceProxyIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/kafka/config") .external("kafka0") diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/BidiStreamRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/BidiStreamRpcIT.java index 74f70513f6..8e85862b20 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/BidiStreamRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/BidiStreamRpcIT.java @@ -40,9 +40,7 @@ public class BidiStreamRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("net0") .clean(); diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/ClientStreamRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/ClientStreamRpcIT.java index 2930c2efe8..9af46146f3 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/ClientStreamRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/ClientStreamRpcIT.java @@ -38,9 +38,7 @@ public class ClientStreamRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("net0") .clean(); diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/ServerStreamRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/ServerStreamRpcIT.java index 5aef2410fc..cffb69558f 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/ServerStreamRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/ServerStreamRpcIT.java @@ -38,9 +38,7 @@ public class ServerStreamRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("net0") .clean(); diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/UnaryRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/UnaryRpcIT.java index 80ecb6867d..7855ea51e3 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/UnaryRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/client/UnaryRpcIT.java @@ -38,9 +38,7 @@ public class UnaryRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("net0") .clean(); diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/BidiStreamRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/BidiStreamRpcIT.java index 5959072c6d..69f274b748 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/BidiStreamRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/BidiStreamRpcIT.java @@ -40,9 +40,7 @@ public class BidiStreamRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("app0") .clean(); diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/ClientStreamRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/ClientStreamRpcIT.java index 83d956cdbd..ee467eaed3 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/ClientStreamRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/ClientStreamRpcIT.java @@ -38,9 +38,7 @@ public class ClientStreamRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("app0") .clean(); diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/RejectedRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/RejectedRpcIT.java index 49c6e241c6..5162b7c553 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/RejectedRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/RejectedRpcIT.java @@ -37,9 +37,7 @@ public class RejectedRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("app0") .clean(); diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/ServerStreamRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/ServerStreamRpcIT.java index 9b1a034a32..ba703f6c6c 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/ServerStreamRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/ServerStreamRpcIT.java @@ -38,9 +38,7 @@ public class ServerStreamRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("app0") .clean(); diff --git a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/UnaryRpcIT.java b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/UnaryRpcIT.java index 18833d58ed..4ea8a2315a 100644 --- a/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/UnaryRpcIT.java +++ b/runtime/binding-grpc/src/test/java/io/aklivity/zilla/runtime/binding/grpc/internal/streams/server/UnaryRpcIT.java @@ -38,9 +38,7 @@ public class UnaryRpcIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/grpc/config") .external("app0") .clean(); diff --git a/runtime/binding-http-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/stream/HttpFileSystemProxyIT.java b/runtime/binding-http-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/stream/HttpFileSystemProxyIT.java index b9786c4e3b..d4787a0e5c 100644 --- a/runtime/binding-http-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/stream/HttpFileSystemProxyIT.java +++ b/runtime/binding-http-filesystem/src/test/java/io/aklivity/zilla/runtime/binding/http/filesystem/internal/stream/HttpFileSystemProxyIT.java @@ -39,9 +39,7 @@ public class HttpFileSystemProxyIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/filesystem/config") .external("filesystem0") diff --git a/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/stream/HttpKafkaProxyIT.java b/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/stream/HttpKafkaProxyIT.java index ff95fd743d..888c4844a0 100644 --- a/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/stream/HttpKafkaProxyIT.java +++ b/runtime/binding-http-kafka/src/test/java/io/aklivity/zilla/runtime/binding/http/kafka/internal/stream/HttpKafkaProxyIT.java @@ -39,9 +39,7 @@ public class HttpKafkaProxyIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/kafka/config") .external("kafka0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/AdvisoryIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/AdvisoryIT.java index bb97ccc902..e5ccfac394 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/AdvisoryIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/AdvisoryIT.java @@ -39,9 +39,7 @@ public class AdvisoryIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("net0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ArchitectureIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ArchitectureIT.java index a931329dd0..ea7f3326dc 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ArchitectureIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ArchitectureIT.java @@ -39,9 +39,7 @@ public class ArchitectureIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("net0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ConnectionManagementIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ConnectionManagementIT.java index 7616897362..bac31467bb 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ConnectionManagementIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ConnectionManagementIT.java @@ -40,9 +40,7 @@ public class ConnectionManagementIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("net0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ConnectionManagementPoolSize1IT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ConnectionManagementPoolSize1IT.java index 376c3c4f2e..0bfb9fa8c5 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ConnectionManagementPoolSize1IT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/ConnectionManagementPoolSize1IT.java @@ -42,9 +42,7 @@ public class ConnectionManagementPoolSize1IT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_MAXIMUM_CONNECTIONS, 1) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/FlowControlIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/FlowControlIT.java index b72089dba0..65f3fdb5be 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/FlowControlIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/FlowControlIT.java @@ -41,9 +41,7 @@ public class FlowControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("net0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/FlowControlLimitsIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/FlowControlLimitsIT.java index dc7ff8580d..0ccfea3b2c 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/FlowControlLimitsIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/FlowControlLimitsIT.java @@ -44,9 +44,7 @@ public class FlowControlLimitsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 128) .configure(ENGINE_BUFFER_POOL_CAPACITY, 256) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/MessageFormatIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/MessageFormatIT.java index 7fda71e3f5..b577942d4b 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/MessageFormatIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/MessageFormatIT.java @@ -40,9 +40,7 @@ public class MessageFormatIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("net0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/TransferCodingsIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/TransferCodingsIT.java index 134b0653e3..7cabe02091 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/TransferCodingsIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/client/TransferCodingsIT.java @@ -40,9 +40,7 @@ public class TransferCodingsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("net0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AccessControlIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AccessControlIT.java index 3b98c45f60..8fd94f97c2 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AccessControlIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AccessControlIT.java @@ -40,9 +40,7 @@ public class AccessControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .configure(HTTP_SERVER_HEADER, "Zilla") .external("app0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AdvisoryIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AdvisoryIT.java index 01615d1061..e85ab6db08 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AdvisoryIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AdvisoryIT.java @@ -39,9 +39,7 @@ public class AdvisoryIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("app0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ArchitectureIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ArchitectureIT.java index 19ddfac250..363a854e9a 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ArchitectureIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ArchitectureIT.java @@ -39,9 +39,7 @@ public class ArchitectureIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("app0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AuthorizationIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AuthorizationIT.java index 4732b44627..ff8b624f9a 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AuthorizationIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/AuthorizationIT.java @@ -40,9 +40,7 @@ public class AuthorizationIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_SERVER_HEADER, "Zilla") .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("app0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ConnectionManagementIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ConnectionManagementIT.java index 05390d81c5..6fc76b884c 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ConnectionManagementIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/ConnectionManagementIT.java @@ -40,9 +40,7 @@ public class ConnectionManagementIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("app0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/FlowControlIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/FlowControlIT.java index 780705a4d4..10825ce156 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/FlowControlIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/FlowControlIT.java @@ -41,9 +41,7 @@ public class FlowControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("app0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/FlowControlLimitsIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/FlowControlLimitsIT.java index cccf33682b..65b32aa182 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/FlowControlLimitsIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/FlowControlLimitsIT.java @@ -41,9 +41,7 @@ public class FlowControlLimitsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 64) .configure(ENGINE_BUFFER_POOL_CAPACITY, 64) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/MessageFormatIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/MessageFormatIT.java index 2e7c45e5a4..221369ca1e 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/MessageFormatIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/MessageFormatIT.java @@ -42,9 +42,7 @@ public class MessageFormatIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("app0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/TransferCodingsIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/TransferCodingsIT.java index de4e5ea259..a8808b7794 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/TransferCodingsIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7230/server/TransferCodingsIT.java @@ -40,9 +40,7 @@ public class TransferCodingsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v1.1") .external("app0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/AbortIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/AbortIT.java index 51e4706ebe..4dbd2f566f 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/AbortIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/AbortIT.java @@ -42,9 +42,7 @@ public class AbortIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("net0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/ConfigIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/ConfigIT.java index 2c93f31985..37eac77678 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/ConfigIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/ConfigIT.java @@ -45,9 +45,7 @@ public class ConfigIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configure(EngineConfiguration.ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/ConnectionManagementIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/ConnectionManagementIT.java index 606d8e8592..47f2198b76 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/ConnectionManagementIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/ConnectionManagementIT.java @@ -44,9 +44,7 @@ public class ConnectionManagementIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/FlowControlIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/FlowControlIT.java index ec6087d45f..213c9ce76d 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/FlowControlIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/FlowControlIT.java @@ -43,9 +43,7 @@ public class FlowControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("net0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/MessageFormatIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/MessageFormatIT.java index bf07492bb4..aa747d0f1c 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/MessageFormatIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/MessageFormatIT.java @@ -43,9 +43,7 @@ public class MessageFormatIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("net0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/StartingIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/StartingIT.java index c893a08e85..83411f0191 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/StartingIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/client/StartingIT.java @@ -43,9 +43,7 @@ public class StartingIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/upgrade") .external("net0") .configure(EngineConfiguration.ENGINE_DRAIN_ON_CLOSE, false) diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AbortIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AbortIT.java index b94ebf6b40..68cb56238e 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AbortIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AbortIT.java @@ -40,9 +40,7 @@ public class AbortIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("app0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AccessControlIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AccessControlIT.java index bb91e2b9d8..ecddd20bbc 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AccessControlIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AccessControlIT.java @@ -41,9 +41,7 @@ public class AccessControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configure(HTTP_SERVER_HEADER, "Zilla") .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AuthorizationIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AuthorizationIT.java index d7c4949856..1a9bfa6755 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AuthorizationIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/AuthorizationIT.java @@ -41,9 +41,7 @@ public class AuthorizationIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configure(HTTP_SERVER_HEADER, "Zilla") .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ConfigIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ConfigIT.java index 300ce1ab0f..082847bf70 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ConfigIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ConfigIT.java @@ -42,9 +42,7 @@ public class ConfigIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("app0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ConnectionManagementIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ConnectionManagementIT.java index 2c47d46bba..531c9a13ff 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ConnectionManagementIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/ConnectionManagementIT.java @@ -44,9 +44,7 @@ public class ConnectionManagementIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("app0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/FlowControlIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/FlowControlIT.java index b713e2841f..b45464732e 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/FlowControlIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/FlowControlIT.java @@ -41,9 +41,7 @@ public class FlowControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("app0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/MessageFormatIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/MessageFormatIT.java index 2b84809dc2..00340a25a9 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/MessageFormatIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/MessageFormatIT.java @@ -40,9 +40,7 @@ public class MessageFormatIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(HTTP_CONCURRENT_STREAMS, 100) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("app0") diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/SettingsIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/SettingsIT.java index b8946d6977..da3941ca56 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/SettingsIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/SettingsIT.java @@ -41,9 +41,7 @@ public class SettingsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/v2") .external("app0") .clean(); diff --git a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/StartingIT.java b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/StartingIT.java index 890bebc4ab..4741673d7a 100644 --- a/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/StartingIT.java +++ b/runtime/binding-http/src/test/java/io/aklivity/zilla/runtime/binding/http/internal/streams/rfc7540/server/StartingIT.java @@ -41,9 +41,7 @@ public class StartingIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/http/config/upgrade") .configure(HTTP_CONCURRENT_STREAMS, 100) .external("app0") diff --git a/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerIT.java b/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerIT.java index e09442f13a..928283cce0 100644 --- a/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerIT.java +++ b/runtime/binding-kafka-grpc/src/test/java/io/aklivity/zilla/runtime/binding/kafka/grpc/internal/stream/KafkaGrpcRemoteServerIT.java @@ -40,9 +40,7 @@ public class KafkaGrpcRemoteServerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/grpc/config") diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheBootstrapIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheBootstrapIT.java index ac1eadab19..40752fe1a0 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheBootstrapIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheBootstrapIT.java @@ -42,9 +42,7 @@ public class CacheBootstrapIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(16384) + .countersBufferCapacity(16384) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(KAFKA_CACHE_SEGMENT_BYTES, 1 * 1024 * 1024) .configure(KAFKA_CACHE_SEGMENT_INDEX_BYTES, 256 * 1024) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java index c282989190..b15c860a1c 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheConsumerIT.java @@ -40,9 +40,7 @@ public class CacheConsumerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("app1") .clean(); diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheDescribeIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheDescribeIT.java index 88f8b3be0f..de23eedc2e 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheDescribeIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheDescribeIT.java @@ -43,9 +43,7 @@ public class CacheDescribeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) .configure(KAFKA_CACHE_SEGMENT_BYTES, 1 * 1024 * 1024) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java index 65b164b076..b2f9e00260 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheFetchIT.java @@ -51,9 +51,7 @@ public class CacheFetchIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheGroupIT.java index e7c19b50aa..136c47c052 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheGroupIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheGroupIT.java @@ -42,9 +42,7 @@ public class CacheGroupIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java index 0b6109112f..3da6fa27ea 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMergedIT.java @@ -46,9 +46,7 @@ public class CacheMergedIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(16384) + .countersBufferCapacity(16384) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) .configure(KAFKA_CACHE_SEGMENT_BYTES, 1 * 1024 * 1024) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMetaIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMetaIT.java index cfdcf7f525..90325d19ab 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMetaIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheMetaIT.java @@ -43,9 +43,7 @@ public class CacheMetaIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) .configure(KAFKA_CACHE_SEGMENT_BYTES, 1 * 1024 * 1024) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java index 12ebdf2fe7..8bbb8e0d6e 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheOffsetFetchIT.java @@ -42,9 +42,7 @@ public class CacheOffsetFetchIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheProduceIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheProduceIT.java index 8aa510437a..0f67e22554 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheProduceIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CacheProduceIT.java @@ -47,9 +47,7 @@ public class CacheProduceIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(KAFKA_CACHE_SERVER_BOOTSTRAP, false) .configure(KAFKA_CACHE_SERVER_RECONNECT_DELAY, 0) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientDescribeIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientDescribeIT.java index 1dcae4d1b1..697dd4324c 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientDescribeIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientDescribeIT.java @@ -40,9 +40,7 @@ public class ClientDescribeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(KAFKA_CLIENT_DESCRIBE_MAX_AGE_MILLIS, 0) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("net0") diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientDescribeSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientDescribeSaslIT.java index 6b109acfec..feef221e5c 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientDescribeSaslIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientDescribeSaslIT.java @@ -42,9 +42,7 @@ public class ClientDescribeSaslIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(KAFKA_CLIENT_DESCRIBE_MAX_AGE_MILLIS, 0) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("net0") diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientFetchIT.java index abaeb83485..32073041b4 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientFetchIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientFetchIT.java @@ -49,9 +49,7 @@ public class ClientFetchIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientFetchSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientFetchSaslIT.java index c522a5e36f..d9bb13cad2 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientFetchSaslIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientFetchSaslIT.java @@ -42,9 +42,7 @@ public class ClientFetchSaslIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(KAFKA_CLIENT_META_MAX_AGE_MILLIS, 0) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("net0") diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java index f0a5b8e7ee..974eaf5419 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java @@ -39,9 +39,7 @@ public class ClientGroupIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("net0") .clean(); diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupSaslIT.java index 789359fb95..0a7d42c17c 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupSaslIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupSaslIT.java @@ -40,9 +40,7 @@ public class ClientGroupSaslIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("net0") .clean(); diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMergedIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMergedIT.java index 8844ff1b7f..8ae2d6fd51 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMergedIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMergedIT.java @@ -46,9 +46,7 @@ public class ClientMergedIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(KAFKA_CLIENT_META_MAX_AGE_MILLIS, 1000) .configure(KAFKA_CLIENT_PRODUCE_MAX_BYTES, 116) diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMetaIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMetaIT.java index 10a77e5b32..ddd5b9fe47 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMetaIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMetaIT.java @@ -40,9 +40,7 @@ public class ClientMetaIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(KAFKA_CLIENT_META_MAX_AGE_MILLIS, 0) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("net0") diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMetaSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMetaSaslIT.java index c3ff4d68d4..75501b13a4 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMetaSaslIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientMetaSaslIT.java @@ -42,9 +42,7 @@ public class ClientMetaSaslIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(KAFKA_CLIENT_META_MAX_AGE_MILLIS, 0) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("net0") diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java index 8e410691a9..fd2bafaf55 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientOffsetFetchIT.java @@ -39,9 +39,7 @@ public class ClientOffsetFetchIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") .external("net0") .clean(); diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceIT.java index 45cb9481c6..5a22d21243 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceIT.java @@ -44,9 +44,7 @@ public class ClientProduceIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceSaslIT.java index 6a44330a41..818f5d7c47 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceSaslIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientProduceSaslIT.java @@ -43,9 +43,7 @@ public class ClientProduceSaslIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config") diff --git a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java index 3405632c2a..8dd6772f7d 100644 --- a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java +++ b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishProxyIT.java @@ -41,9 +41,7 @@ public class MqttKafkaPublishProxyIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/kafka/config") .external("kafka0") diff --git a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java index 72ce0cd198..350186e2c4 100644 --- a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java +++ b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java @@ -48,9 +48,7 @@ public class MqttKafkaSessionProxyIT public final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configure(SESSION_ID_NAME, diff --git a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java index 93ad269c1e..7eecb79af7 100644 --- a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java +++ b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java @@ -42,9 +42,7 @@ public class MqttKafkaSubscribeProxyIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/kafka/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java index 3ebb5051af..502ba2304a 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/ConnectionIT.java @@ -41,9 +41,7 @@ public class ConnectionIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java index 32a0b402cf..cf35834032 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PingIT.java @@ -41,9 +41,7 @@ public class PingIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java index 7521710e67..69b5195bfc 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/PublishIT.java @@ -41,9 +41,7 @@ public class PublishIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java index 2941c395fb..5fedfc1ec4 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/SubscribeIT.java @@ -41,9 +41,7 @@ public class SubscribeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java index 54ddbe6c80..355b6f10b1 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/client/UnsubscribeIT.java @@ -41,9 +41,7 @@ public class UnsubscribeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java index d0636e005c..61294723de 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/ConnectionIT.java @@ -47,9 +47,7 @@ public class ConnectionIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java index d2e3dc352d..87431e71c8 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PingIT.java @@ -43,9 +43,7 @@ public class PingIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java index c669347c6c..2f60250465 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/PublishIT.java @@ -46,9 +46,7 @@ public class PublishIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java index 6495045ddb..4b0c9ca57b 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SessionIT.java @@ -43,9 +43,7 @@ public class SessionIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java index dd27bfb33e..7faaebd55c 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/SubscribeIT.java @@ -43,9 +43,7 @@ public class SubscribeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configure(SUBSCRIPTION_ID_NAME, diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java index 14752d0c83..f91ed01775 100644 --- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java +++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/UnsubscribeIT.java @@ -41,9 +41,7 @@ public class UnsubscribeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(PUBLISH_TIMEOUT, 1L) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/mqtt/config") diff --git a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/streams/ProxyClientIT.java b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/streams/ProxyClientIT.java index 31cb12223b..2212900dea 100644 --- a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/streams/ProxyClientIT.java +++ b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/streams/ProxyClientIT.java @@ -45,9 +45,7 @@ public class ProxyClientIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/proxy/config") .external("net0") .clean(); diff --git a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/streams/ProxyServerIT.java b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/streams/ProxyServerIT.java index 97694924fa..117a75ef65 100644 --- a/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/streams/ProxyServerIT.java +++ b/runtime/binding-proxy/src/test/java/io/aklivity/zilla/runtime/binding/proxy/internal/streams/ProxyServerIT.java @@ -39,9 +39,7 @@ public class ProxyServerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/proxy/config") .external("app0") .clean(); diff --git a/runtime/binding-sse-kafka/src/test/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/stream/SseKafkaProxyIT.java b/runtime/binding-sse-kafka/src/test/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/stream/SseKafkaProxyIT.java index 69b6b85b1b..6990dc85d2 100644 --- a/runtime/binding-sse-kafka/src/test/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/stream/SseKafkaProxyIT.java +++ b/runtime/binding-sse-kafka/src/test/java/io/aklivity/zilla/runtime/binding/sse/kafka/internal/stream/SseKafkaProxyIT.java @@ -39,9 +39,7 @@ public class SseKafkaProxyIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .configurationRoot("io/aklivity/zilla/specs/binding/sse/kafka/config") .external("kafka0") diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/AdvisoryIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/AdvisoryIT.java index da8286f10b..8f0231c9c0 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/AdvisoryIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/AdvisoryIT.java @@ -39,9 +39,7 @@ public class AdvisoryIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(2048) - .responseBufferCapacity(2048) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("net0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ByteOrderMarkIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ByteOrderMarkIT.java index bc7fb22d52..f3480a8c5e 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ByteOrderMarkIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ByteOrderMarkIT.java @@ -39,9 +39,7 @@ public class ByteOrderMarkIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("net0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/DataIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/DataIT.java index 509bb22539..c42a761436 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/DataIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/DataIT.java @@ -40,9 +40,7 @@ public class DataIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .configure(ENGINE_BUFFER_SLOT_CAPACITY, 8192) .external("net0") diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/EndOfLineIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/EndOfLineIT.java index 2ae9c51ab2..deed2563a5 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/EndOfLineIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/EndOfLineIT.java @@ -39,9 +39,7 @@ public class EndOfLineIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("net0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ErrorIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ErrorIT.java index 0d5dcd5220..a091196951 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ErrorIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ErrorIT.java @@ -39,9 +39,7 @@ public class ErrorIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(2048) - .responseBufferCapacity(2048) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("net0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/HandshakeIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/HandshakeIT.java index 88c71cf12f..6e33f4fdf9 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/HandshakeIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/HandshakeIT.java @@ -39,9 +39,7 @@ public class HandshakeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("net0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ReconnectIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ReconnectIT.java index 2b5ce15a6d..377ba88b33 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ReconnectIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/ReconnectIT.java @@ -39,9 +39,7 @@ public class ReconnectIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("net0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/TypeIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/TypeIT.java index b92285e405..f115e327fd 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/TypeIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/client/TypeIT.java @@ -41,9 +41,7 @@ public class TypeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("net0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/AdvisoryIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/AdvisoryIT.java index bc5a5113dd..19aa21e0af 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/AdvisoryIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/AdvisoryIT.java @@ -39,9 +39,7 @@ public class AdvisoryIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(2048) - .responseBufferCapacity(2048) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("app0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ChallengeIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ChallengeIT.java index 36684efa57..a5f7800331 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ChallengeIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ChallengeIT.java @@ -39,9 +39,7 @@ public class ChallengeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(2048) - .responseBufferCapacity(2048) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("app0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/DataIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/DataIT.java index 1227e5a6f2..8033f9be7b 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/DataIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/DataIT.java @@ -40,9 +40,7 @@ public class DataIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("app0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ErrorIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ErrorIT.java index b745a1030a..d0ec0e9cf3 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ErrorIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ErrorIT.java @@ -39,9 +39,7 @@ public class ErrorIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(2048) - .responseBufferCapacity(2048) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("app0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/HandshakeIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/HandshakeIT.java index f59f21491b..e15d239c4b 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/HandshakeIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/HandshakeIT.java @@ -41,9 +41,7 @@ public class HandshakeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("app0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/IdIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/IdIT.java index 8c483c61f8..72111649d7 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/IdIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/IdIT.java @@ -38,10 +38,7 @@ public class IdIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ReconnectIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ReconnectIT.java index 11fe77db3a..11ca12284e 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ReconnectIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/ReconnectIT.java @@ -39,9 +39,7 @@ public class ReconnectIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("app0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/TimestampIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/TimestampIT.java index 205e6dd0a4..a77dd58a57 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/TimestampIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/TimestampIT.java @@ -40,9 +40,7 @@ public class TimestampIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("app0") .clean(); diff --git a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/TypeIT.java b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/TypeIT.java index 70088fe4d4..6742fb2209 100644 --- a/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/TypeIT.java +++ b/runtime/binding-sse/src/test/java/io/aklivity/zilla/runtime/binding/sse/internal/streams/server/TypeIT.java @@ -41,9 +41,7 @@ public class TypeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/sse/config") .external("app0") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIOExceptionFromReadIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIOExceptionFromReadIT.java index bccb9ddb36..59073e4b8c 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIOExceptionFromReadIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIOExceptionFromReadIT.java @@ -44,9 +44,7 @@ public class ClientIOExceptionFromReadIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIOExceptionFromWriteIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIOExceptionFromWriteIT.java index 762fd7ad18..6d7e53a22b 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIOExceptionFromWriteIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIOExceptionFromWriteIT.java @@ -50,9 +50,7 @@ public class ClientIOExceptionFromWriteIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIT.java index 5a3b4ff4a2..cd69d87413 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientIT.java @@ -48,9 +48,7 @@ public class ClientIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientLimitsIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientLimitsIT.java index a9063eabde..da7a350a5b 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientLimitsIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientLimitsIT.java @@ -46,9 +46,7 @@ public class ClientLimitsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 16) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientPartialWriteIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientPartialWriteIT.java index b21ae130a5..7ca8935eb4 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientPartialWriteIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientPartialWriteIT.java @@ -64,9 +64,7 @@ public class ClientPartialWriteIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .external("app0") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientPartialWriteLimitsIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientPartialWriteLimitsIT.java index 935ed825c4..021a27a73a 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientPartialWriteLimitsIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientPartialWriteLimitsIT.java @@ -64,9 +64,7 @@ public class ClientPartialWriteLimitsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 16) .configure(ENGINE_BUFFER_POOL_CAPACITY, 16) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientResetAndAbortIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientResetAndAbortIT.java index 9a7cefd5be..7a971cd62c 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientResetAndAbortIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientResetAndAbortIT.java @@ -51,9 +51,7 @@ public class ClientResetAndAbortIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientRoutingIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientRoutingIT.java index e379328b4a..a2aad7fa70 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientRoutingIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ClientRoutingIT.java @@ -40,9 +40,7 @@ public class ClientRoutingIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIOExceptionFromReadIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIOExceptionFromReadIT.java index c38776f7ba..2906d2505e 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIOExceptionFromReadIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIOExceptionFromReadIT.java @@ -43,9 +43,7 @@ public class ServerIOExceptionFromReadIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .external("app0") diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIOExceptionFromWriteIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIOExceptionFromWriteIT.java index 1c7cd1e2f9..87e9c706d9 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIOExceptionFromWriteIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIOExceptionFromWriteIT.java @@ -48,9 +48,7 @@ public class ServerIOExceptionFromWriteIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .external("app0") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIT.java index 3bef4e02ae..e28612865c 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerIT.java @@ -52,9 +52,7 @@ public class ServerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(TCP_MAX_CONNECTIONS, 3) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerLimitsIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerLimitsIT.java index fd08831697..3a9c40cc29 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerLimitsIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerLimitsIT.java @@ -45,10 +45,7 @@ public class ServerLimitsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) - // Initial window size for output to network: + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 16) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .external("app0") diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerPartialWriteIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerPartialWriteIT.java index ef972e2fe6..f7979d5fdf 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerPartialWriteIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerPartialWriteIT.java @@ -62,9 +62,7 @@ public class ServerPartialWriteIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .external("app0") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerPartialWriteLimitsIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerPartialWriteLimitsIT.java index 70e58b4ccb..f001f4c2d3 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerPartialWriteLimitsIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerPartialWriteLimitsIT.java @@ -62,12 +62,8 @@ public class ServerPartialWriteLimitsIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) - // Initial window size for output to network: + .countersBufferCapacity(8192) .configure(ENGINE_BUFFER_SLOT_CAPACITY, 16) - // Overall buffer pool size same as slot size so maximum concurrent streams with partial writes = 1 .configure(ENGINE_BUFFER_POOL_CAPACITY, 16) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .external("app0") diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerResetAndAbortIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerResetAndAbortIT.java index 73faa7f2e6..be07c9ef63 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerResetAndAbortIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerResetAndAbortIT.java @@ -50,9 +50,7 @@ public class ServerResetAndAbortIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") .external("app0") .clean(); diff --git a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerRoutingIT.java b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerRoutingIT.java index 7819bd8210..27b80370fc 100644 --- a/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerRoutingIT.java +++ b/runtime/binding-tcp/src/test/java/io/aklivity/zilla/runtime/binding/tcp/internal/streams/ServerRoutingIT.java @@ -41,9 +41,7 @@ public class ServerRoutingIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(TCP_MAX_CONNECTIONS, 3) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/specs/binding/tcp/config") diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientFragmentedIT.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientFragmentedIT.java index 0486228556..b0fee23e02 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientFragmentedIT.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientFragmentedIT.java @@ -42,9 +42,7 @@ public class ClientFragmentedIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tls/config") .external("net0") .clean(); diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientIT.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientIT.java index 8526d07531..bdbdd0d0de 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientIT.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ClientIT.java @@ -46,9 +46,7 @@ public class ClientIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tls/config") .external("net0") .configure(ENGINE_DRAIN_ON_CLOSE, false) diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ProxyIT.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ProxyIT.java index b8e06602b8..27ff2ac5dd 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ProxyIT.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ProxyIT.java @@ -39,9 +39,7 @@ public class ProxyIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tls/config") .external("net1") .configure(ENGINE_DRAIN_ON_CLOSE, false) diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerFragmentedIT.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerFragmentedIT.java index d6a09c8cf2..0dfc80fdb1 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerFragmentedIT.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerFragmentedIT.java @@ -42,9 +42,7 @@ public class ServerFragmentedIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tls/config") .external("app0") .clean(); diff --git a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerIT.java b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerIT.java index f2e7e80527..27ece98ddb 100644 --- a/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerIT.java +++ b/runtime/binding-tls/src/test/java/io/aklivity/zilla/runtime/binding/tls/internal/streams/ServerIT.java @@ -44,9 +44,7 @@ public class ServerIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/binding/tls/config") .external("app0") .configure(ENGINE_DRAIN_ON_CLOSE, false) diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/AdvisoryIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/AdvisoryIT.java index 61f41b39a1..f7d22301f7 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/AdvisoryIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/AdvisoryIT.java @@ -42,9 +42,7 @@ public class AdvisoryIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("net0") .clean(); diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/BaseFramingIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/BaseFramingIT.java index e4e18ec9a2..ec8640c9b1 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/BaseFramingIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/BaseFramingIT.java @@ -43,9 +43,7 @@ public class BaseFramingIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("net0") .clean(); diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/FlowControlIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/FlowControlIT.java index 8a19a9eaed..7436b35fba 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/FlowControlIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/FlowControlIT.java @@ -42,9 +42,7 @@ public class FlowControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("net0") .clean(); diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/OpeningHandshakeIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/OpeningHandshakeIT.java index e0983e7cc9..5ede78693e 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/OpeningHandshakeIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/client/OpeningHandshakeIT.java @@ -43,9 +43,7 @@ public class OpeningHandshakeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("net0") .clean(); diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/AdvisoryIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/AdvisoryIT.java index 8af1531700..11d89fc600 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/AdvisoryIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/AdvisoryIT.java @@ -43,9 +43,7 @@ public class AdvisoryIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("app0") .configure(ENGINE_DRAIN_ON_CLOSE, false) diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/BaseFramingIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/BaseFramingIT.java index 00d61bb949..9f45297f91 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/BaseFramingIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/BaseFramingIT.java @@ -43,9 +43,7 @@ public class BaseFramingIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("app0") .clean(); diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/ClosingHandshakeIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/ClosingHandshakeIT.java index feb11c905f..9b9a8db51e 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/ClosingHandshakeIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/ClosingHandshakeIT.java @@ -42,9 +42,7 @@ public class ClosingHandshakeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("app0") .clean(); diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/ControlIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/ControlIT.java index deacfca40c..b56df19049 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/ControlIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/ControlIT.java @@ -40,9 +40,7 @@ public class ControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("app0") .clean(); diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/FlowControlIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/FlowControlIT.java index 70ed155911..4c3d957554 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/FlowControlIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/FlowControlIT.java @@ -43,9 +43,7 @@ public class FlowControlIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("app0") .configure(ENGINE_DRAIN_ON_CLOSE, false) diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/FragmentationIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/FragmentationIT.java index fb021c3d29..5c588bdd56 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/FragmentationIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/FragmentationIT.java @@ -42,9 +42,7 @@ public class FragmentationIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("app0") .clean(); diff --git a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/OpeningHandshakeIT.java b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/OpeningHandshakeIT.java index 3616f47656..e6e1a55938 100644 --- a/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/OpeningHandshakeIT.java +++ b/runtime/binding-ws/src/test/java/io/aklivity/zilla/runtime/binding/ws/internal/streams/server/OpeningHandshakeIT.java @@ -43,9 +43,7 @@ public class OpeningHandshakeIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/binding/ws/config") .external("app0") .clean(); diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java index 6bdc7840ec..b004e48d90 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java @@ -44,15 +44,11 @@ public class EngineConfiguration extends Configuration public static final PropertyDef ENGINE_DIRECTORY; public static final PropertyDef ENGINE_CACHE_DIRECTORY; public static final PropertyDef ENGINE_HOST_RESOLVER; - public static final IntPropertyDef ENGINE_BUDGETS_BUFFER_CAPACITY; - public static final IntPropertyDef ENGINE_LOAD_BUFFER_CAPACITY; - public static final IntPropertyDef ENGINE_STREAMS_BUFFER_CAPACITY; - public static final IntPropertyDef ENGINE_COMMAND_BUFFER_CAPACITY; - public static final IntPropertyDef ENGINE_RESPONSE_BUFFER_CAPACITY; - public static final IntPropertyDef ENGINE_COUNTERS_BUFFER_CAPACITY; public static final IntPropertyDef ENGINE_BUFFER_POOL_CAPACITY; public static final IntPropertyDef ENGINE_BUFFER_SLOT_CAPACITY; - public static final IntPropertyDef ENGINE_ROUTES_BUFFER_CAPACITY; + public static final IntPropertyDef ENGINE_STREAMS_BUFFER_CAPACITY; + public static final IntPropertyDef ENGINE_COUNTERS_BUFFER_CAPACITY; + public static final IntPropertyDef ENGINE_BUDGETS_BUFFER_CAPACITY; public static final BooleanPropertyDef ENGINE_TIMESTAMPS; public static final IntPropertyDef ENGINE_MAXIMUM_MESSAGES_PER_READ; public static final IntPropertyDef ENGINE_MAXIMUM_EXPIRATIONS_PER_POLL; @@ -83,15 +79,11 @@ public class EngineConfiguration extends Configuration ENGINE_CACHE_DIRECTORY = config.property(Path.class, "cache.directory", EngineConfiguration::cacheDirectory, "cache"); ENGINE_HOST_RESOLVER = config.property(HostResolver.class, "host.resolver", EngineConfiguration::decodeHostResolver, EngineConfiguration::defaultHostResolver); - ENGINE_BUDGETS_BUFFER_CAPACITY = config.property("budgets.buffer.capacity", 1024 * 1024); - ENGINE_LOAD_BUFFER_CAPACITY = config.property("load.buffer.capacity", 1024 * 8); - ENGINE_STREAMS_BUFFER_CAPACITY = config.property("streams.buffer.capacity", 1024 * 1024); - ENGINE_COMMAND_BUFFER_CAPACITY = config.property("command.buffer.capacity", 1024 * 1024); - ENGINE_RESPONSE_BUFFER_CAPACITY = config.property("response.buffer.capacity", 1024 * 1024); - ENGINE_COUNTERS_BUFFER_CAPACITY = config.property("counters.buffer.capacity", 1024 * 1024); ENGINE_BUFFER_POOL_CAPACITY = config.property("buffer.pool.capacity", EngineConfiguration::defaultBufferPoolCapacity); ENGINE_BUFFER_SLOT_CAPACITY = config.property("buffer.slot.capacity", 64 * 1024); - ENGINE_ROUTES_BUFFER_CAPACITY = config.property("routes.buffer.capacity", 1024 * 1024); + ENGINE_STREAMS_BUFFER_CAPACITY = config.property("streams.buffer.capacity", 1024 * 1024); + ENGINE_BUDGETS_BUFFER_CAPACITY = config.property("budgets.buffer.capacity", 1024 * 1024); + ENGINE_COUNTERS_BUFFER_CAPACITY = config.property("counters.buffer.capacity", 1024 * 1024); ENGINE_TIMESTAMPS = config.property("timestamps", true); ENGINE_MAXIMUM_MESSAGES_PER_READ = config.property("maximum.messages.per.read", Integer.MAX_VALUE); ENGINE_MAXIMUM_EXPIRATIONS_PER_POLL = config.property("maximum.expirations.per.poll", Integer.MAX_VALUE); @@ -172,26 +164,6 @@ public int bufferSlotCapacity() return ENGINE_BUFFER_SLOT_CAPACITY.getAsInt(this); } - public int maximumStreamsCount() - { - return bufferPoolCapacity() / bufferSlotCapacity(); - } - - public int maximumMessagesPerRead() - { - return ENGINE_MAXIMUM_MESSAGES_PER_READ.getAsInt(this); - } - - public int maximumExpirationsPerPoll() - { - return ENGINE_MAXIMUM_EXPIRATIONS_PER_POLL.getAsInt(this); - } - - public int taskParallelism() - { - return ENGINE_TASK_PARALLELISM.getAsInt(this); - } - public int budgetsBufferCapacity() { return ENGINE_BUDGETS_BUFFER_CAPACITY.getAsInt(this); @@ -202,39 +174,24 @@ public int streamsBufferCapacity() return ENGINE_STREAMS_BUFFER_CAPACITY.getAsInt(this); } - public int commandBufferCapacity() - { - return ENGINE_COMMAND_BUFFER_CAPACITY.get(this); - } - - public int responseBufferCapacity() - { - return ENGINE_RESPONSE_BUFFER_CAPACITY.getAsInt(this); - } - - public int loadBufferCapacity() + public int countersBufferCapacity() { - return ENGINE_LOAD_BUFFER_CAPACITY.getAsInt(this); - } - - public int routesBufferCapacity() - { - return ENGINE_ROUTES_BUFFER_CAPACITY.get(this); + return ENGINE_COUNTERS_BUFFER_CAPACITY.getAsInt(this); } - public int counterBufferCapacity() + public int maximumMessagesPerRead() { - return ENGINE_COUNTERS_BUFFER_CAPACITY.getAsInt(this); + return ENGINE_MAXIMUM_MESSAGES_PER_READ.getAsInt(this); } - public int counterValuesBufferCapacity() + public int maximumExpirationsPerPoll() { - return ENGINE_COUNTERS_BUFFER_CAPACITY.getAsInt(this); + return ENGINE_MAXIMUM_EXPIRATIONS_PER_POLL.getAsInt(this); } - public int counterLabelsBufferCapacity() + public int taskParallelism() { - return ENGINE_COUNTERS_BUFFER_CAPACITY.getAsInt(this) * 2; + return ENGINE_TASK_PARALLELISM.getAsInt(this); } public boolean timestamps() diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/DispatchAgent.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/DispatchAgent.java index 3dd2a7a62a..5c2fa9357e 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/DispatchAgent.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/internal/registry/DispatchAgent.java @@ -238,21 +238,21 @@ public DispatchAgent( this.countersLayout = new ScalarsLayout.Builder() .path(config.directory().resolve(String.format("metrics/counters%d", index))) - .capacity(config.counterBufferCapacity()) + .capacity(config.countersBufferCapacity()) .readonly(readonly) .label("counters") .build(); this.gaugesLayout = new ScalarsLayout.Builder() .path(config.directory().resolve(String.format("metrics/gauges%d", index))) - .capacity(config.counterBufferCapacity()) + .capacity(config.countersBufferCapacity()) .readonly(readonly) .label("gauges") .build(); this.histogramsLayout = new HistogramsLayout.Builder() .path(config.directory().resolve(String.format("metrics/histograms%d", index))) - .capacity(config.counterBufferCapacity()) + .capacity(config.countersBufferCapacity()) .readonly(readonly) .build(); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/EngineIT.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/EngineIT.java index 6f8b6d34d0..a0713f833d 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/EngineIT.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/EngineIT.java @@ -39,9 +39,7 @@ public class EngineIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(4096) + .countersBufferCapacity(4096) .configurationRoot("io/aklivity/zilla/specs/engine/config") .external("app0") .clean(); diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/ReconfigureFileIT.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/ReconfigureFileIT.java index 3aaa3ec44b..e57e33072d 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/ReconfigureFileIT.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/ReconfigureFileIT.java @@ -54,9 +54,7 @@ public class ReconfigureFileIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/runtime/engine/internal") .external("app0") diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/ReconfigureHttpIT.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/ReconfigureHttpIT.java index 16cd085090..936c5bb85c 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/ReconfigureHttpIT.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/ReconfigureHttpIT.java @@ -48,9 +48,7 @@ public class ReconfigureHttpIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configure(ENGINE_DRAIN_ON_CLOSE, false) .configurationRoot("io/aklivity/zilla/runtime/engine/internal") .external("app0") diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java index 7c2ee1c211..0adfc1f262 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java @@ -15,14 +15,11 @@ */ package io.aklivity.zilla.runtime.engine.test; -import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_COMMAND_BUFFER_CAPACITY; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_CONFIG_URL; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_COUNTERS_BUFFER_CAPACITY; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DIRECTORY; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_DRAIN_ON_CLOSE; -import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_RESPONSE_BUFFER_CAPACITY; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_ROUTED_DELAY_MILLIS; -import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_STREAMS_BUFFER_CAPACITY; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_SYNTHETIC_ABORT; import static io.aklivity.zilla.runtime.engine.EngineConfiguration.ENGINE_WORKERS; import static java.nio.file.FileVisitOption.FOLLOW_LINKS; @@ -89,29 +86,16 @@ public EngineRule() configure(ENGINE_WORKERS, 1); } - public EngineRule directory(String directory) + public EngineRule directory( + String directory) { return configure(ENGINE_DIRECTORY, directory); } - public EngineRule commandBufferCapacity(int commandBufferCapacity) + public EngineRule countersBufferCapacity( + int countersBufferCapacity) { - return configure(ENGINE_COMMAND_BUFFER_CAPACITY, commandBufferCapacity); - } - - public EngineRule responseBufferCapacity(int responseBufferCapacity) - { - return configure(ENGINE_RESPONSE_BUFFER_CAPACITY, responseBufferCapacity); - } - - public EngineRule counterValuesBufferCapacity(int counterValuesBufferCapacity) - { - return configure(ENGINE_COUNTERS_BUFFER_CAPACITY, counterValuesBufferCapacity); - } - - public EngineRule streamsBufferCapacity(int streamsBufferCapacity) - { - return configure(ENGINE_STREAMS_BUFFER_CAPACITY, streamsBufferCapacity); + return configure(ENGINE_COUNTERS_BUFFER_CAPACITY, countersBufferCapacity); } public EngineRule configure( diff --git a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardIT.java b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardIT.java index a90c7ffee8..c89f04281c 100644 --- a/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardIT.java +++ b/runtime/guard-jwt/src/test/java/io/aklivity/zilla/runtime/guard/jwt/internal/JwtGuardIT.java @@ -32,9 +32,7 @@ public class JwtGuardIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/guard/jwt/config") .clean(); diff --git a/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultIT.java b/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultIT.java index 2987b14a29..55ebfa788f 100644 --- a/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultIT.java +++ b/runtime/vault-filesystem/src/test/java/io/aklivity/zilla/runtime/vault/filesystem/internal/FileSystemVaultIT.java @@ -34,9 +34,7 @@ public class FileSystemVaultIT private final EngineRule engine = new EngineRule() .directory("target/zilla-itests") - .commandBufferCapacity(1024) - .responseBufferCapacity(1024) - .counterValuesBufferCapacity(8192) + .countersBufferCapacity(8192) .configurationRoot("io/aklivity/zilla/specs/vault/filesystem/config") .configure(ENGINE_DRAIN_ON_CLOSE, false) .clean(); From 1dc149d43a99d53c4a0c0beb8c1eebe1096c1275 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 21 Sep 2023 19:42:03 -0700 Subject: [PATCH 102/115] Engine configuration worker capacity (#443) --- .../runtime/engine/EngineConfiguration.java | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java index b004e48d90..3aae325f97 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java @@ -34,6 +34,8 @@ import org.agrona.LangUtil; +import io.aklivity.zilla.runtime.engine.internal.layouts.BudgetsLayout; + public class EngineConfiguration extends Configuration { public static final boolean DEBUG_BUDGETS = Boolean.getBoolean("zilla.engine.debug.budgets"); @@ -44,6 +46,7 @@ public class EngineConfiguration extends Configuration public static final PropertyDef ENGINE_DIRECTORY; public static final PropertyDef ENGINE_CACHE_DIRECTORY; public static final PropertyDef ENGINE_HOST_RESOLVER; + public static final IntPropertyDef ENGINE_WORKER_CAPACITY; public static final IntPropertyDef ENGINE_BUFFER_POOL_CAPACITY; public static final IntPropertyDef ENGINE_BUFFER_SLOT_CAPACITY; public static final IntPropertyDef ENGINE_STREAMS_BUFFER_CAPACITY; @@ -79,10 +82,13 @@ public class EngineConfiguration extends Configuration ENGINE_CACHE_DIRECTORY = config.property(Path.class, "cache.directory", EngineConfiguration::cacheDirectory, "cache"); ENGINE_HOST_RESOLVER = config.property(HostResolver.class, "host.resolver", EngineConfiguration::decodeHostResolver, EngineConfiguration::defaultHostResolver); + ENGINE_WORKER_CAPACITY = config.property("worker.capacity", 64); ENGINE_BUFFER_POOL_CAPACITY = config.property("buffer.pool.capacity", EngineConfiguration::defaultBufferPoolCapacity); ENGINE_BUFFER_SLOT_CAPACITY = config.property("buffer.slot.capacity", 64 * 1024); - ENGINE_STREAMS_BUFFER_CAPACITY = config.property("streams.buffer.capacity", 1024 * 1024); - ENGINE_BUDGETS_BUFFER_CAPACITY = config.property("budgets.buffer.capacity", 1024 * 1024); + ENGINE_STREAMS_BUFFER_CAPACITY = config.property("streams.buffer.capacity", + EngineConfiguration::defaultStreamsBufferCapacity); + ENGINE_BUDGETS_BUFFER_CAPACITY = config.property("budgets.buffer.capacity", + EngineConfiguration::defaultBudgetsBufferCapacity); ENGINE_COUNTERS_BUFFER_CAPACITY = config.property("counters.buffer.capacity", 1024 * 1024); ENGINE_TIMESTAMPS = config.property("timestamps", true); ENGINE_MAXIMUM_MESSAGES_PER_READ = config.property("maximum.messages.per.read", Integer.MAX_VALUE); @@ -267,7 +273,19 @@ public Function hostResolver() private static int defaultBufferPoolCapacity( Configuration config) { - return ENGINE_BUFFER_SLOT_CAPACITY.get(config) * 64; + return ENGINE_BUFFER_SLOT_CAPACITY.get(config) * ENGINE_WORKER_CAPACITY.getAsInt(config); + } + + private static int defaultStreamsBufferCapacity( + Configuration config) + { + return ENGINE_BUFFER_SLOT_CAPACITY.get(config) * ENGINE_WORKER_CAPACITY.getAsInt(config); + } + + private static int defaultBudgetsBufferCapacity( + Configuration config) + { + return BudgetsLayout.SIZEOF_BUDGET_ENTRY * ENGINE_WORKER_CAPACITY.getAsInt(config); } private static URL configURL( From 8287e8715d27a5a14e6cfa7a4dc7b49664feb919 Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Thu, 21 Sep 2023 20:43:13 -0700 Subject: [PATCH 103/115] Don't close group stream on cluster and describe streams closer (#444) --- .../stream/KafkaClientGroupFactory.java | 51 ------------------- .../binding-kafka/src/main/zilla/protocol.idl | 1 + .../kafka/internal/stream/ClientGroupIT.java | 2 +- .../topic.offset.info/client.rpt | 3 +- .../topic.offset.info/server.rpt | 3 +- 5 files changed, 6 insertions(+), 54 deletions(-) diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index eed082c50d..34c1aacdd8 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -414,35 +414,6 @@ private MessageConsumer newStream( return receiver; } - private void doBegin( - MessageConsumer receiver, - long originId, - long routedId, - long streamId, - long sequence, - long acknowledge, - int maximum, - long traceId, - long authorization, - long affinity, - Consumer extension) - { - final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) - .originId(originId) - .routedId(routedId) - .streamId(streamId) - .sequence(sequence) - .acknowledge(acknowledge) - .maximum(maximum) - .traceId(traceId) - .authorization(authorization) - .affinity(affinity) - .extension(extension) - .build(); - - receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); - } - private void doBegin( MessageConsumer receiver, long originId, @@ -1861,20 +1832,9 @@ private void onNetworkData( private void onNetworkEnd( EndFW end) { - final long traceId = end.traceId(); - state = KafkaState.closedReply(state); cleanupDecodeSlotIfNecessary(); - - if (!delegate.isApplicationReplyOpen()) - { - onError(traceId); - } - else if (decodeSlot == NO_SLOT) - { - delegate.doApplicationEnd(traceId); - } } private void onNetworkAbort( @@ -2560,20 +2520,9 @@ private void onNetworkData( private void onNetworkEnd( EndFW end) { - final long traceId = end.traceId(); - state = KafkaState.closedReply(state); cleanupDecodeSlotIfNecessary(); - - if (!KafkaState.replyOpened(delegate.state)) - { - cleanupNetwork(traceId); - } - else if (decodeSlot == NO_SLOT) - { - delegate.doApplicationEnd(traceId); - } } private void onNetworkAbort( diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl index 66afcfdbf8..550d03d9c9 100644 --- a/runtime/binding-kafka/src/main/zilla/protocol.idl +++ b/runtime/binding-kafka/src/main/zilla/protocol.idl @@ -484,6 +484,7 @@ scope protocol struct OffsetFetchResponse { + int32 correlationId; int32 topicCount; } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java index 974eaf5419..377abe001a 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java @@ -83,7 +83,7 @@ public void shouldHandleCoordinatorNotAvailableError() throws Exception @Specification({ "${app}/leader/client", "${net}/coordinator.reject.invalid.consumer/server"}) - public void shouldHRejectInvalidConsumer() throws Exception + public void shouldRejectInvalidConsumer() throws Exception { k3po.finish(); } diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt index 11f8fe8d16..98de6cd612 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/client.rpt @@ -39,7 +39,8 @@ write 38 # size 1 # partitions 0 # partition -read 30 # size +read 34 # size + (int:newRequestId) 1 # topics 4s "test" # "test" topic 1 # partitions diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt index fc324de30e..842c3ac62f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/offset.fetch.v0/topic.offset.info/server.rpt @@ -36,7 +36,8 @@ read 38 # size 1 # partitions 0 # partition -write 30 # size +write 34 # size + ${newRequestId} 1 # topics 4s "test" # "test" topic 1 # partitions From 1303bf062ec984bb48f0a752f390199e20ca5bbc Mon Sep 17 00:00:00 2001 From: John Fallows Date: Thu, 21 Sep 2023 22:19:14 -0700 Subject: [PATCH 104/115] Adjust engine backoff strategy configuration (#446) --- .../aklivity/zilla/runtime/engine/EngineConfiguration.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java index 3aae325f97..267493dff4 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java @@ -96,9 +96,8 @@ public class EngineConfiguration extends Configuration ENGINE_TASK_PARALLELISM = config.property("task.parallelism", 1); ENGINE_BACKOFF_MAX_SPINS = config.property("backoff.idle.strategy.max.spins", 64L); ENGINE_BACKOFF_MAX_YIELDS = config.property("backoff.idle.strategy.max.yields", 64L); - // TODO: shorten property name string values to match constant naming - ENGINE_BACKOFF_MIN_PARK_NANOS = config.property("backoff.idle.strategy.min.park.period", NANOSECONDS.toNanos(64L)); - ENGINE_BACKOFF_MAX_PARK_NANOS = config.property("backoff.idle.strategy.max.park.period", MILLISECONDS.toNanos(1L)); + ENGINE_BACKOFF_MIN_PARK_NANOS = config.property("backoff.min.park.nanos", NANOSECONDS.toNanos(64L)); + ENGINE_BACKOFF_MAX_PARK_NANOS = config.property("backoff.max.park.nanos", MILLISECONDS.toNanos(100L)); ENGINE_DRAIN_ON_CLOSE = config.property("drain.on.close", false); ENGINE_SYNTHETIC_ABORT = config.property("synthetic.abort", false); ENGINE_ROUTED_DELAY_MILLIS = config.property("routed.delay.millis", 0L); From 9b44d4d0369e35425f0a6e57b830efb1afc72b0c Mon Sep 17 00:00:00 2001 From: bmaidics Date: Fri, 22 Sep 2023 18:48:40 +0200 Subject: [PATCH 105/115] Do not include generated subcsriptionId (#448) --- .../generate/Varuint32FlyweightGenerator.java | 4 ++-- .../mqtt/internal/stream/MqttServerFactory.java | 16 ++++++++++++---- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/build/flyweight-maven-plugin/src/main/java/io/aklivity/zilla/build/maven/plugins/flyweight/internal/generate/Varuint32FlyweightGenerator.java b/build/flyweight-maven-plugin/src/main/java/io/aklivity/zilla/build/maven/plugins/flyweight/internal/generate/Varuint32FlyweightGenerator.java index 3552737de1..b5f7ea0166 100644 --- a/build/flyweight-maven-plugin/src/main/java/io/aklivity/zilla/build/maven/plugins/flyweight/internal/generate/Varuint32FlyweightGenerator.java +++ b/build/flyweight-maven-plugin/src/main/java/io/aklivity/zilla/build/maven/plugins/flyweight/internal/generate/Varuint32FlyweightGenerator.java @@ -233,9 +233,9 @@ private MethodSpec setMethod() .addModifiers(PUBLIC) .returns(flyweightType.nestedClass("Builder")) .addParameter(int.class, "value") - .beginControlFlow("if (value > 0x0FFFFFFF)") + .beginControlFlow("if (value < 0)") .addStatement("throw new $T(String.format($S, value))", IllegalArgumentException.class, - "Input value %d too long") + "Input value %d is negative") .endControlFlow() .addStatement("final MutableDirectBuffer buffer = buffer()") .addStatement("int progress = offset()") diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 79b84c4968..3366461bde 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -223,6 +223,7 @@ public final class MqttServerFactory implements MqttStreamFactory private static final String16FW NULL_STRING = new String16FW((String) null); public static final String SHARED_SUBSCRIPTION_LITERAL = "$share"; + public static final int GENERATED_SUBSCRIPTION_ID_MASK = 0x70; private final BeginFW beginRO = new BeginFW(); private final DataFW dataRO = new DataFW(); @@ -2517,16 +2518,17 @@ private void doEncodePublish( MutableBoolean retainAsPublished = new MutableBoolean(false); - subscriptionIds.forEach(subscriptionId -> + subscriptionIds.forEach(s -> { - if (subscriptionId.value() > 0) + final int subscriptionId = s.value(); + if (subscriptionId > 0 && !generatedSubscriptionId(subscriptionId)) { Optional result = subscriptions.stream() - .filter(subscription -> subscription.id == subscriptionId.value()) + .filter(subscription -> subscription.id == subscriptionId) .findFirst(); retainAsPublished.set(retainAsPublished.value | result.isPresent() && result.get().retainAsPublished()); mqttPropertyRW.wrap(propertyBuffer, propertiesSize.get(), propertyBuffer.capacity()) - .subscriptionId(v -> v.set(subscriptionId.value())); + .subscriptionId(v -> v.set(subscriptionId)); propertiesSize.set(mqttPropertyRW.limit()); } }); @@ -2605,6 +2607,12 @@ private void doEncodePublish( } } + private boolean generatedSubscriptionId( + int subscriptionId) + { + return (subscriptionId & GENERATED_SUBSCRIPTION_ID_MASK) == GENERATED_SUBSCRIPTION_ID_MASK; + } + private int calculatePublishNetworkFlags(int applicationTypeAndFlags, int qos) { int flags = 0; From 8975694a9e03b0f9429fd69dd93834c41422d71a Mon Sep 17 00:00:00 2001 From: John Fallows Date: Fri, 22 Sep 2023 09:49:16 -0700 Subject: [PATCH 106/115] Configure DRAIN_ON_CLOSE false during ITs to report exceptions more easily --- .../java/io/aklivity/zilla/runtime/engine/test/EngineRule.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java index 0adfc1f262..4461e8d624 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java @@ -80,7 +80,7 @@ public EngineRule() this.builder = Engine.builder(); this.properties = new Properties(); - configure(ENGINE_DRAIN_ON_CLOSE, true); + configure(ENGINE_DRAIN_ON_CLOSE, false); configure(ENGINE_SYNTHETIC_ABORT, true); configure(ENGINE_ROUTED_DELAY_MILLIS, 500L); configure(ENGINE_WORKERS, 1); From 7adfbfc0590764fdc53470e7635b2ee705ecabec Mon Sep 17 00:00:00 2001 From: Attila Kreiner Date: Fri, 22 Sep 2023 19:00:24 +0200 Subject: [PATCH 107/115] Rename config command to generate (#449) --- .../command/config/internal/airline/ZillaConfigCommand.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java index 32a6cd245a..838c8d5ad9 100644 --- a/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java +++ b/incubator/command-config/src/main/java/io/aklivity/zilla/runtime/command/config/internal/airline/ZillaConfigCommand.java @@ -34,7 +34,7 @@ import io.aklivity.zilla.runtime.command.config.internal.asyncapi.mqtt.proxy.AsyncApiMqttProxyConfigGenerator; import io.aklivity.zilla.runtime.command.config.internal.openapi.http.proxy.OpenApiHttpProxyConfigGenerator; -@Command(name = "config", description = "Generate configuration file") +@Command(name = "generate", description = "Generate configuration file") public final class ZillaConfigCommand extends ZillaCommand { private static final Map> GENERATORS = Map.of( From 48dfe97560b84271b988bf47e2e7c453a2d8fd29 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Fri, 22 Sep 2023 11:06:23 -0700 Subject: [PATCH 108/115] Increase default budgets buffer capacity --- .../io/aklivity/zilla/runtime/engine/EngineConfiguration.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java index 267493dff4..7c9bc53e24 100644 --- a/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java +++ b/runtime/engine/src/main/java/io/aklivity/zilla/runtime/engine/EngineConfiguration.java @@ -284,7 +284,8 @@ private static int defaultStreamsBufferCapacity( private static int defaultBudgetsBufferCapacity( Configuration config) { - return BudgetsLayout.SIZEOF_BUDGET_ENTRY * ENGINE_WORKER_CAPACITY.getAsInt(config); + // more consistent with original defaults + return BudgetsLayout.SIZEOF_BUDGET_ENTRY * 512 * ENGINE_WORKER_CAPACITY.getAsInt(config); } private static URL configURL( From 0091f43c5dcc31900425bcda098477527d0d158d Mon Sep 17 00:00:00 2001 From: bmaidics Date: Fri, 22 Sep 2023 20:42:43 +0200 Subject: [PATCH 109/115] Remove clientId from subcsribeKey (#450) --- .../internal/stream/MqttServerFactory.java | 26 +++++-------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 3366461bde..96f99ec481 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -470,13 +470,6 @@ private int topicKey( return System.identityHashCode(topic.intern()); } - private int subscribeKey( - String clientId, - long resolveId) - { - return System.identityHashCode((clientId + "_" + resolveId).intern()); - } - private MessageConsumer newStream( MessageConsumer sender, long originId, @@ -1226,7 +1219,7 @@ private final class MqttServer private final long encodeBudgetId; private final Int2ObjectHashMap publishStreams; - private final Int2ObjectHashMap subscribeStreams; + private final Long2ObjectHashMap subscribeStreams; private final Int2ObjectHashMap topicAliases; private final Int2IntHashMap subscribePacketIds; private final Object2IntHashMap unsubscribePacketIds; @@ -1309,7 +1302,7 @@ private MqttServer( this.encodeBudgetId = budgetId; this.decoder = decodeInitialType; this.publishStreams = new Int2ObjectHashMap<>(); - this.subscribeStreams = new Int2ObjectHashMap<>(); + this.subscribeStreams = new Long2ObjectHashMap<>(); this.topicAliases = new Int2ObjectHashMap<>(); this.subscribePacketIds = new Int2IntHashMap(-1); this.unsubscribePacketIds = new Object2IntHashMap<>(-1); @@ -2116,12 +2109,11 @@ private void openSubscribeStreams( subscriptionsByRouteId.forEach((key, value) -> { - int subscribeKey = subscribeKey(clientId.asString(), key); - MqttSubscribeStream stream = subscribeStreams.computeIfAbsent(subscribeKey, s -> + MqttSubscribeStream stream = subscribeStreams.computeIfAbsent(key, s -> new MqttSubscribeStream(routedId, key, implicitSubscribe)); stream.packetId = packetId; value.removeIf(s -> s.reasonCode > GRANTED_QOS_2); - stream.doSubscribeBeginOrFlush(traceId, affinity, subscribeKey, value); + stream.doSubscribeBeginOrFlush(traceId, affinity, value); }); } @@ -2243,8 +2235,7 @@ private void sendUnsuback( final MqttBindingConfig binding = bindings.get(routedId); final MqttRouteConfig resolved = binding != null ? binding.resolveSubscribe(sessionId, topicFilter) : null; - final int subscribeKey = subscribeKey(clientId.asString(), resolved.id); - final MqttSubscribeStream stream = subscribeStreams.get(subscribeKey); + final MqttSubscribeStream stream = subscribeStreams.get(resolved.id); Optional subscription = stream.getSubscriptionByFilter(topicFilter, newState); @@ -4022,7 +4013,6 @@ private class MqttSubscribeStream private int state; private final List subscriptions; private boolean acknowledged; - private int clientKey; private int packetId; private final boolean adminSubscribe; @@ -4052,11 +4042,9 @@ private Optional getSubscriptionByFilter( private void doSubscribeBeginOrFlush( long traceId, long affinity, - int clientKey, List subscriptions) { this.subscriptions.addAll(subscriptions); - this.clientKey = clientKey; if (!MqttState.initialOpening(state)) { @@ -4168,7 +4156,7 @@ private void setNetClosed() if (MqttState.closed(state)) { - subscribeStreams.remove(clientKey); + subscribeStreams.remove(routedId); } } @@ -4430,7 +4418,7 @@ private void setSubscribeAppClosed() { if (MqttState.closed(state)) { - subscribeStreams.remove(clientKey); + subscribeStreams.remove(routedId); } } From 88309e831461a20e372768a262553705f4dabc09 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Fri, 22 Sep 2023 14:08:21 -0700 Subject: [PATCH 110/115] Restore drain on close --- .../java/io/aklivity/zilla/runtime/engine/test/EngineRule.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java index 4461e8d624..0adfc1f262 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/test/EngineRule.java @@ -80,7 +80,7 @@ public EngineRule() this.builder = Engine.builder(); this.properties = new Properties(); - configure(ENGINE_DRAIN_ON_CLOSE, false); + configure(ENGINE_DRAIN_ON_CLOSE, true); configure(ENGINE_SYNTHETIC_ABORT, true); configure(ENGINE_ROUTED_DELAY_MILLIS, 500L); configure(ENGINE_WORKERS, 1); From cbc370a0d859886336f13519bed862428c017943 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Sat, 23 Sep 2023 07:30:52 -0700 Subject: [PATCH 111/115] Ignore IT failing only on GitHub Actions --- .../io/aklivity/zilla/runtime/engine/internal/EngineIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/EngineIT.java b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/EngineIT.java index a0713f833d..158e3ec490 100644 --- a/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/EngineIT.java +++ b/runtime/engine/src/test/java/io/aklivity/zilla/runtime/engine/internal/EngineIT.java @@ -18,6 +18,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.rules.RuleChain.outerRule; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.DisableOnDebug; @@ -117,6 +118,7 @@ public void shouldReceiveClientSentWriteClose() throws Exception k3po.finish(); } + @Ignore("GitHub Actions") @Test @Configuration("server.yaml") @Specification({ From 342123985889e68f97e31e5a4799e9b098f1ab9e Mon Sep 17 00:00:00 2001 From: bmaidics Date: Sat, 23 Sep 2023 17:00:18 +0200 Subject: [PATCH 112/115] Fix implicit subscribe no packetId at startup (#451) --- .../binding/mqtt/internal/stream/MqttServerFactory.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java index 96f99ec481..af7670f022 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java @@ -3351,9 +3351,8 @@ private void onSessionData( subscription.flags = filter.flags(); subscriptions.add(subscription); }); - int packetId = subscribePacketIds.get(subscriptions.get(0).id); - subscriptions.forEach(sub -> subscribePacketIds.remove(sub.id)); - openSubscribeStreams(packetId, traceId, authorization, subscriptions, true); + + openSubscribeStreams(0, traceId, authorization, subscriptions, true); sessionPresent = true; } } From 4fb67586549c4f54a43895bec55680cee900d719 Mon Sep 17 00:00:00 2001 From: bmaidics Date: Sat, 23 Sep 2023 22:30:41 +0200 Subject: [PATCH 113/115] Mqtt client publish fix (#464) --- .../internal/stream/MqttClientFactory.java | 28 ++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java index 518c04eccc..cdbeb1a378 100644 --- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java +++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttClientFactory.java @@ -80,6 +80,7 @@ import java.util.function.LongFunction; import java.util.function.LongSupplier; import java.util.function.LongUnaryOperator; +import java.util.regex.Pattern; import java.util.stream.Collectors; import org.agrona.DirectBuffer; @@ -947,7 +948,10 @@ private int decodePublish( final Varuint32FW firstSubscriptionId = subscriptionIdsRW.build().matchFirst(s -> true); final int subscriptionId = firstSubscriptionId != null ? firstSubscriptionId.value() : 0; - if (!client.existStreamForId(subscriptionId)) + boolean existSubscribeStream = subscriptionId != 0 ? client.existStreamForId(subscriptionId) + : client.existStreamForTopic(mqttPublishHeader.topic); + + if (!existSubscribeStream) { MqttSessionStateFW.Builder sessionStateBuilder = mqttSessionStateRW.wrap(sessionStateBuffer, 0, sessionStateBuffer.capacity()); @@ -2693,6 +2697,28 @@ private boolean existStreamForId( return sessionStream.subscriptions.stream().anyMatch(s -> s.id == subscriptionId); } + private boolean existStreamForTopic( + String topic) + { + boolean match = sessionStream.subscriptions.stream().anyMatch(s -> + { + String regex = s.filter.replace("#", ".*").replace("+", "[^/]+"); + return Pattern.matches(regex, topic); + }); + + if (!match) + { + match = sessionStream.unAckedSubscriptionsByPacketId.values().stream().anyMatch(ss -> + ss.stream().anyMatch(s -> + { + String regex = s.filter.replace("#", ".*").replace("+", "[^/]+"); + return Pattern.matches(regex, topic); + })); + } + + return match; + } + private int nextPacketId() { final int packetId = packetIdCounter.incrementAndGet(); From f86fb8318f5d0b3acc7cb953c263673452517dfe Mon Sep 17 00:00:00 2001 From: Akram Yakubov Date: Sat, 23 Sep 2023 16:16:51 -0700 Subject: [PATCH 114/115] Connection pool for kafka group client (#438) --- .../kafka/internal/KafkaConfiguration.java | 7 + .../stream/KafkaCacheGroupFactory.java | 32 - .../stream/KafkaClientConnectionPool.java | 1347 +++++++++++++++++ .../internal/stream/KafkaClientFactory.java | 12 +- .../stream/KafkaClientGroupFactory.java | 201 ++- .../kafka/internal/stream/ClientGroupIT.java | 1 - .../client.rpt | 1 + .../client.rpt | 3 - .../server.rpt | 3 - .../coordinator.not.available/client.rpt | 15 +- .../coordinator.not.available/server.rpt | 9 +- .../client.rpt | 106 +- .../server.rpt | 77 +- .../client.rpt | 19 +- .../server.rpt | 9 +- .../client.rpt | 16 +- .../server.rpt | 10 +- .../client.rpt | 17 +- .../server.rpt | 11 +- .../client.rpt | 15 +- .../server.rpt | 9 +- .../rebalance.protocol.highlander/client.rpt | 19 +- .../rebalance.protocol.highlander/server.rpt | 13 +- .../rebalance.protocol.unknown/client.rpt | 14 +- .../rebalance.protocol.unknown/server.rpt | 8 +- .../rebalance.sync.group/client.rpt | 17 +- .../rebalance.sync.group/server.rpt | 11 +- .../leader/client.rpt | 14 +- .../leader/server.rpt | 8 +- 29 files changed, 1611 insertions(+), 413 deletions(-) create mode 100644 runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java index 6ddfd3582d..f70dadadc7 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/KafkaConfiguration.java @@ -69,6 +69,7 @@ public class KafkaConfiguration extends Configuration public static final PropertyDef KAFKA_CLIENT_GROUP_REBALANCE_TIMEOUT; public static final PropertyDef KAFKA_CLIENT_ID; public static final PropertyDef KAFKA_CLIENT_INSTANCE_ID; + public static final BooleanPropertyDef KAFKA_CLIENT_CONNECTION_POOL; private static final ConfigurationDef KAFKA_CONFIG; @@ -111,6 +112,7 @@ public class KafkaConfiguration extends Configuration KAFKA_CACHE_SEGMENT_BYTES = config.property("cache.segment.bytes", 0x40000000); KAFKA_CACHE_SEGMENT_INDEX_BYTES = config.property("cache.segment.index.bytes", 0xA00000); KAFKA_CACHE_CLIENT_TRAILERS_SIZE_MAX = config.property("cache.client.trailers.size.max", 256); + KAFKA_CLIENT_CONNECTION_POOL = config.property("client.connection.pool", true); KAFKA_CONFIG = config; } @@ -240,6 +242,11 @@ public boolean cacheServerBootstrap() return KAFKA_CACHE_SERVER_BOOTSTRAP.getAsBoolean(this); } + public boolean clientConnectionPool() + { + return KAFKA_CLIENT_CONNECTION_POOL.getAsBoolean(this); + } + public int cacheClientReconnect() { return KAFKA_CACHE_CLIENT_RECONNECT_DELAY.getAsInt(this); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java index 63ba697551..43dbddc16a 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheGroupFactory.java @@ -269,38 +269,6 @@ private void doData( receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); } - - private void doDataNull( - MessageConsumer receiver, - long originId, - long routedId, - long streamId, - long sequence, - long acknowledge, - int maximum, - long traceId, - long authorization, - long budgetId, - int reserved, - Flyweight extension) - { - final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) - .originId(originId) - .routedId(routedId) - .streamId(streamId) - .sequence(sequence) - .acknowledge(acknowledge) - .maximum(maximum) - .traceId(traceId) - .authorization(authorization) - .budgetId(budgetId) - .reserved(reserved) - .extension(extension.buffer(), extension.offset(), extension.sizeof()) - .build(); - - receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); - } - private void doFlush( MessageConsumer receiver, long originId, diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java new file mode 100644 index 0000000000..c1eb7f2c27 --- /dev/null +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java @@ -0,0 +1,1347 @@ +/* + * Copyright 2021-2023 Aklivity Inc. + * + * Aklivity licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.aklivity.zilla.runtime.binding.kafka.internal.stream; + +import static io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressProtocol.STREAM; +import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_BUDGET_ID; +import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_CREDITOR_INDEX; +import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; +import static java.lang.System.currentTimeMillis; + +import java.util.function.Consumer; +import java.util.function.IntConsumer; +import java.util.function.LongSupplier; +import java.util.function.LongUnaryOperator; + +import org.agrona.DirectBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.collections.Long2LongHashMap; +import org.agrona.collections.Long2ObjectHashMap; +import org.agrona.collections.LongArrayQueue; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.concurrent.UnsafeBuffer; + +import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration; +import io.aklivity.zilla.runtime.binding.kafka.internal.budget.MergedBudgetCreditor; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressInetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW; +import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW; +import io.aklivity.zilla.runtime.engine.EngineContext; +import io.aklivity.zilla.runtime.engine.binding.BindingHandler; +import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; + +public final class KafkaClientConnectionPool +{ + private static final long NO_DELTA = -1L; + private static final int KAFKA_FRAME_LENGTH_FIELD_OFFSET = 4; + private static final int FLAG_FIN = 0x01; + private static final int FLAG_INIT = 0x02; + private static final int FLAG_SKIP = 0x08; + private static final int FLAG_NONE = 0x00; + private static final Consumer EMPTY_EXTENSION = ex -> {}; + + private static final int SIGNAL_CONNECTION_CLEANUP = 0x80000001; + private static final int SIGNAL_STREAM_INITIAL_RESET = 0x80000001; + private static final int SIGNAL_STREAM_REPLY_BEGIN = 0x80000002; + private static final int SIGNAL_STREAM_REPLY_END = 0x80000003; + private static final int SIGNAL_STREAM_REPLY_ABORT = 0x80000004; + private static final String CLUSTER = ""; + + private final BeginFW beginRO = new BeginFW(); + private final DataFW dataRO = new DataFW(); + private final EndFW endRO = new EndFW(); + private final AbortFW abortRO = new AbortFW(); + private final SignalFW signalRO = new SignalFW(); + private final ResetFW resetRO = new ResetFW(); + private final WindowFW windowRO = new WindowFW(); + private final ProxyBeginExFW proxyBeginExRO = new ProxyBeginExFW(); + private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW(); + + private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder(); + private final BeginFW.Builder beginRW = new BeginFW.Builder(); + private final DataFW.Builder dataRW = new DataFW.Builder(); + private final EndFW.Builder endRW = new EndFW.Builder(); + private final AbortFW.Builder abortRW = new AbortFW.Builder(); + private final SignalFW.Builder signalRW = new SignalFW.Builder(); + private final ResetFW.Builder resetRW = new ResetFW.Builder(); + private final WindowFW.Builder windowRW = new WindowFW.Builder(); + + private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder(); + + private final RequestHeaderFW requestHeaderRO = new RequestHeaderFW(); + + private final MergedBudgetCreditor creditor; + private final int proxyTypeId; + private final MutableDirectBuffer writeBuffer; + private final MutableDirectBuffer encodeBuffer; + private final KafkaClientSignaler signaler; + private final BindingHandler streamFactory; + private final LongUnaryOperator supplyInitialId; + private final LongUnaryOperator supplyReplyId; + private final LongSupplier supplyTraceId; + private final Object2ObjectHashMap connectionPool; + private final Long2ObjectHashMap streamsByInitialIds; + + public KafkaClientConnectionPool( + KafkaConfiguration config, + EngineContext context, + MergedBudgetCreditor creditor) + { + this.proxyTypeId = context.supplyTypeId("proxy"); + this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.encodeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); + this.signaler = new KafkaClientSignaler(context.signaler()); + this.streamFactory = context.streamFactory(); + this.supplyInitialId = context::supplyInitialId; + this.supplyReplyId = context::supplyReplyId; + this.supplyTraceId = context::supplyTraceId; + this.creditor = creditor; + this.connectionPool = new Object2ObjectHashMap(); + this.streamsByInitialIds = new Long2ObjectHashMap<>(); + } + + private MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ProxyBeginExFW proxyBeginEx = extension.get(proxyBeginExRO::tryWrap); + + MessageConsumer newStream = null; + String address = CLUSTER; + + if (proxyBeginEx != null) + { + final ProxyAddressInetFW inet = proxyBeginEx.address().inet(); + String host = inet.destination().asString(); + int port = inet.destinationPort(); + address = String.format("%s:%d", host, port); + } + + final KafkaClientConnection connection = connectionPool.computeIfAbsent(address, s -> + newConnection(originId, routedId, authorization)); + newStream = connection.newStream(msgTypeId, buffer, index, length, sender); + + return newStream; + } + + private KafkaClientConnection newConnection( + long originId, + long routedId, + long authorization) + { + return new KafkaClientConnection(originId, routedId, authorization); + } + + private MessageConsumer newNetworkStream( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + final MessageConsumer receiver = + streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + + return receiver; + } + + private void doBegin( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long affinity, + Consumer extension) + { + final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .affinity(affinity) + .extension(extension) + .build(); + + receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + int flags, + long budgetId, + int reserved, + OctetsFW payload, + Flyweight extension) + { + final DataFW frame = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(frame.typeId(), frame.buffer(), frame.offset(), frame.sizeof()); + } + + private void doData( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + int flags, + long budgetId, + int reserved, + DirectBuffer payload, + int offset, + int length, + Flyweight extension) + { + final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .flags(flags) + .budgetId(budgetId) + .reserved(reserved) + .payload(payload, offset, length) + .extension(extension.buffer(), extension.offset(), extension.sizeof()) + .build(); + + receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof()); + } + + private void doEnd( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof()); + } + + private void doAbort( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + Consumer extension) + { + final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .extension(extension) + .build(); + + receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof()); + } + + private void doSignal( + MessageConsumer receiver, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + int signalId) + { + final SignalFW signal = signalRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(0) + .traceId(traceId) + .cancelId(0) + .signalId(signalId) + .contextId(0) + .build(); + + receiver.accept(signal.typeId(), signal.buffer(), signal.offset(), signal.sizeof()); + } + + private void doWindow( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization, + long budgetId, + int padding) + { + final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .budgetId(budgetId) + .padding(padding) + .build(); + + sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof()); + } + + private void doReset( + MessageConsumer sender, + long originId, + long routedId, + long streamId, + long sequence, + long acknowledge, + int maximum, + long traceId, + long authorization) + { + final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity()) + .originId(originId) + .routedId(routedId) + .streamId(streamId) + .sequence(sequence) + .acknowledge(acknowledge) + .maximum(maximum) + .traceId(traceId) + .authorization(authorization) + .build(); + + sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof()); + } + + public BindingHandler streamFactory() + { + return this::newStream; + } + + public class KafkaClientSignaler implements Signaler + { + private final Signaler delegate; + + public KafkaClientSignaler( + Signaler delegate) + { + + this.delegate = delegate; + } + + @Override + public long signalAt( + long timeMillis, + int signalId, + IntConsumer handler) + { + return delegate.signalAt(timeMillis, signalId, handler); + } + + @Override + public void signalNow( + long originId, + long routedId, + long streamId, + int signalId, + int contextId) + { + assert contextId == 0; + + KafkaClientStream stream = streamsByInitialIds.get(streamId); + stream.doStreamSignalNow(signalId); + } + + @Override + public long signalAt( + long timeMillis, + long originId, + long routedId, + long streamId, + int signalId, + int contextId) + { + assert contextId == 0; + + KafkaClientStream stream = streamsByInitialIds.get(streamId); + return stream.doStreamSignalAt(timeMillis, signalId); + } + + @Override + public long signalTask( + Runnable task, + long originId, + long routedId, + long streamId, + int signalId, + int contextId) + { + return 0; + } + + @Override + public boolean cancel( + long cancelId) + { + return delegate.cancel(cancelId); + } + } + + public Signaler signaler() + { + return signaler; + } + + final class KafkaClientStream + { + private final KafkaClientConnection connection; + private final long originId; + private final long routedId; + private final long authorization; + private final MessageConsumer sender; + + private final long initialId; + private final long replyId; + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialPad; + private long initialBud; + private long initialSeqDelta = NO_DELTA; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + private long replyBud; + + private int nextRequestId; + private int nexResponseId; + private int requestBytes; + private int responseBytes; + + private int state; + + + private KafkaClientStream( + KafkaClientConnection connection, + MessageConsumer sender, + long originId, + long routedId, + long initialId, + long authorization) + { + this.connection = connection; + this.sender = sender; + this.originId = originId; + this.routedId = routedId; + this.initialId = initialId; + this.replyId = supplyReplyId.applyAsLong(initialId); + this.authorization = authorization; + } + + private void onStreamMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onStreamBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onStreamData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onStreamEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onStreamAbort(abort); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onStreamWindow(window); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onStreamReset(reset); + break; + default: + break; + } + } + + private void onStreamBegin( + BeginFW begin) + { + final long initialId = begin.streamId(); + final long traceId = begin.traceId(); + + assert (initialId & 0x0000_0000_0000_0001L) != 0L; + + final OctetsFW extension = begin.extension(); + final ProxyBeginExFW proxyBeginEx = extension.get(proxyBeginExRO::tryWrap); + + state = KafkaState.openingInitial(state); + + String host = null; + int port = 0; + + if (proxyBeginEx != null) + { + final ProxyAddressInetFW inet = proxyBeginEx.address().inet(); + host = inet.destination().asString(); + port = inet.destinationPort(); + } + + connection.doConnectionBegin(traceId, host, port); + + connection.doConnectionSignalNow(initialId, SIGNAL_STREAM_REPLY_BEGIN); + } + + private void onStreamData( + DataFW data) + { + final long initialId = data.streamId(); + final long traceId = data.traceId(); + final long authorization = data.authorization(); + final long budgetId = data.budgetId(); + final int reserved = data.reserved(); + final int flags = data.flags(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + initialSeqDelta = connection.initialSeq; + + if (requestBytes == 0) + { + nextRequestId++; + + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); + + RequestHeaderFW requestHeader = requestHeaderRO.wrap(buffer, offset, limit); + requestBytes = requestHeader.length() + KAFKA_FRAME_LENGTH_FIELD_OFFSET; + } + + requestBytes -= payload.sizeof(); + connection.doConnectionData(initialId, traceId, authorization, budgetId, + flags, reserved, payload, extension); + assert requestBytes >= 0; + } + + private void onStreamEnd( + EndFW end) + { + state = KafkaState.closedInitial(state); + + connection.doConnectionSignalNow(initialId, SIGNAL_STREAM_REPLY_END); + } + + private void onStreamAbort( + AbortFW abort) + { + state = KafkaState.closedInitial(state); + + connection.doConnectionSignalNow(initialId, SIGNAL_STREAM_REPLY_ABORT); + } + + private void onStreamReset( + ResetFW reset) + { + state = KafkaState.closingReply(state); + + connection.doConnectionSignalNow(initialId, SIGNAL_STREAM_INITIAL_RESET); + } + + private void onStreamWindow( + WindowFW window) + { + final long acknowledge = window.acknowledge(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int replyMax = window.maximum(); + + assert replyAck <= replySeq; + + state = KafkaState.openedReply(state); + + connection.doConnectionWindow(traceId, acknowledge, budgetId, padding, replyMax); + } + + private void doStreamWindow( + long authorization, + long traceId, + long budgetId, + int padding) + { + initialSeq = connection.initialSeq - initialSeqDelta; + initialAck = connection.initialAck - initialSeqDelta; + initialMax = connection.initialMax; + + doWindow(sender, originId, routedId, initialId, 0, 0, initialMax, + traceId, authorization, budgetId, padding); + } + + private void doStreamBegin( + long traceId) + { + state = KafkaState.openingReply(state); + + doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, initialBud, EMPTY_EXTENSION); + + doStreamWindow(connection.authorization, traceId, connection.connectionInitialBudgetId, + connection.initialPad); + } + + private void doStreamData( + long traceId, + int flags, + long sequence, + long acknowledge, + int reserved, + DirectBuffer payload, + int offset, + int length, + Flyweight extension) + { + replySeq = sequence; + replyAck = acknowledge; + + if (responseBytes == 0) + { + nexResponseId++; + final ResponseHeaderFW responseHeader = responseHeaderRO.wrap(payload, offset, offset + length); + responseBytes = responseHeader.length() + KAFKA_FRAME_LENGTH_FIELD_OFFSET; + } + + responseBytes -= length; + + if (!KafkaState.replyClosing(state)) + { + doData(sender, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization, flags, replyBud, reserved, payload, offset, length, extension); + } + else + { + if (responseBytes == 0) + { + doStreamEnd(traceId); + } + } + } + + private void doStreamEnd( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + state = KafkaState.closingReply(state); + if (nextRequestId == nexResponseId) + { + state = KafkaState.closedReply(state); + + doEnd(sender, originId, routedId, replyId, 0, 0, 0, + traceId, authorization, EMPTY_EXTENSION); + + streamsByInitialIds.remove(initialId); + } + } + } + + private void doStreamAbort( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + state = KafkaState.closingReply(state); + + if (nextRequestId == nexResponseId) + { + state = KafkaState.closedReply(state); + + doAbort(sender, originId, routedId, replyId, 0, 0, 0, + traceId, authorization, EMPTY_EXTENSION); + + streamsByInitialIds.remove(initialId); + } + } + } + + private void doStreamReset( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + state = KafkaState.closedInitial(state); + + doReset(sender, originId, routedId, initialId, 0, 0, 0, + traceId, authorization); + + streamsByInitialIds.remove(initialId); + } + } + + private void cleanupStream( + long traceId) + { + doStreamReset(traceId); + doStreamAbort(traceId); + + streamsByInitialIds.remove(initialId); + } + + private void doStreamSignalNow( + int signalId) + { + connection.doConnectionSignalNow(initialId, signalId); + } + + private long doStreamSignalAt( + long timeMillis, + int signalId) + { + return connection.doConnectionSignalAt(initialId, timeMillis, signalId); + } + + private void onSignal( + SignalFW signal) + { + final long traceId = signal.traceId(); + final int signalId = signal.signalId(); + + switch (signalId) + { + case SIGNAL_STREAM_REPLY_BEGIN: + doStreamBegin(traceId); + break; + case SIGNAL_STREAM_REPLY_END: + doStreamEnd(traceId); + break; + case SIGNAL_STREAM_REPLY_ABORT: + doStreamAbort(traceId); + break; + case SIGNAL_STREAM_INITIAL_RESET: + doStreamReset(traceId); + break; + default: + doSignal(sender, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, signalId); + } + } + } + + final class KafkaClientConnection implements BindingHandler + { + private final long originId; + private final long routedId; + private final long authorization; + private final LongArrayQueue correlations; + private final Long2LongHashMap signalerCorrelations; + + private long initialId; + private long replyId; + private MessageConsumer receiver; + + private int state; + + private long initialSeq; + private long initialAck; + private int initialMax; + private int initialMin; + private int initialPad; + private long initialBud; + + private long replySeq; + private long replyAck; + private int replyMax; + private int replyPad; + + private int nextRequestId; + private int nextContextId; + private long connectionInitialBudgetId = NO_BUDGET_ID; + private long reconnectAt = NO_CANCEL_ID; + private int requestBytes; + private int responseBytes; + + private KafkaClientConnection( + long originId, + long routedId, + long authorization) + { + this.originId = originId; + this.routedId = routedId; + this.authorization = authorization; + this.correlations = new LongArrayQueue(); + this.signalerCorrelations = new Long2LongHashMap(-1L); + } + + private void doConnectionBegin( + long traceId, + String host, + int port) + { + if (KafkaState.closed(state)) + { + state = 0; + } + + if (!KafkaState.initialOpening(state)) + { + assert state == 0; + + this.initialId = supplyInitialId.applyAsLong(routedId); + this.replyId = supplyReplyId.applyAsLong(initialId); + + Consumer extension = EMPTY_EXTENSION; + + state = KafkaState.openingInitial(state); + + if (host != null) + { + extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l) + .typeId(proxyTypeId) + .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM)) + .source("0.0.0.0") + .destination(host) + .sourcePort(0) + .destinationPort(port))) + .build() + .sizeof()); + } + + this.receiver = newNetworkStream(this::onConnectionMessage, + originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, 0L, extension); + } + } + + private void doConnectionData( + long connectionInitialId, + long traceId, + long authorization, + long budgetId, + int flags, + int reserved, + OctetsFW payload, + Flyweight extension) + { + if (requestBytes == 0) + { + final int requestId = nextRequestId++; + correlations.add(connectionInitialId); + + final DirectBuffer buffer = payload.buffer(); + final int offset = payload.offset(); + final int limit = payload.limit(); + + RequestHeaderFW requestHeader = requestHeaderRO.wrap(buffer, offset, limit); + requestBytes = requestHeader.length() + KAFKA_FRAME_LENGTH_FIELD_OFFSET; + + int progress = 0; + RequestHeaderFW newRequestHeader = requestHeaderRW.wrap(encodeBuffer, 0, encodeBuffer.capacity()) + .length(requestHeader.length()) + .apiKey(requestHeader.apiKey()) + .apiVersion(requestHeader.apiVersion()) + .correlationId(requestId) + .clientId(requestHeader.clientId()) + .build(); + progress = newRequestHeader.limit(); + + final int remaining = payload.sizeof() - progress; + encodeBuffer.putBytes(progress, buffer, requestHeader.limit(), remaining); + + final int length = progress + remaining; + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, flags, budgetId, reserved, encodeBuffer, 0, length, extension); + + requestBytes -= length; + assert requestBytes >= 0; + } + else + { + doData(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, flags, budgetId, reserved, payload, extension); + requestBytes -= payload.sizeof(); + } + + initialSeq += reserved; + + assert initialSeq <= initialAck + initialMax; + } + + private void doConnectionEnd( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doEnd(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doConnectionAbort( + long traceId) + { + if (!KafkaState.initialClosed(state)) + { + doAbort(receiver, originId, routedId, initialId, initialSeq, initialAck, initialMax, + traceId, authorization, EMPTY_EXTENSION); + + state = KafkaState.closedInitial(state); + } + } + + private void doConnectionSignalNow( + long streamId, + int signalId) + { + nextContextId++; + signalerCorrelations.put(nextContextId, streamId); + signaler.delegate.signalNow(originId, routedId, this.initialId, signalId, nextContextId); + } + + private long doConnectionSignalAt( + long streamId, + long timeMillis, + int signalId) + { + nextContextId++; + signalerCorrelations.put(nextContextId, streamId); + return signaler.delegate.signalAt( + timeMillis, originId, routedId, this.initialId, signalId, nextContextId); + } + + private void doConnectionReset( + long traceId) + { + if (!KafkaState.replyClosed(state)) + { + doReset(receiver, originId, routedId, replyId, replySeq, replyAck, replyMax, + traceId, authorization); + + state = KafkaState.closedReply(state); + } + } + + private void doConnectionWindow( + long traceId, + long authorization, + long budgetId, + int padding, + int replyMax) + { + replyAck = Math.max(replyAck - replyPad, 0); + this.replyMax = replyMax; + + doWindow(receiver, originId, routedId, replyId, replySeq, replyAck, this.replyMax, + traceId, authorization, budgetId, padding + replyPad); + } + + private void onConnectionMessage( + int msgTypeId, + DirectBuffer buffer, + int index, + int length) + { + switch (msgTypeId) + { + case BeginFW.TYPE_ID: + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + onConnectionBegin(begin); + break; + case DataFW.TYPE_ID: + final DataFW data = dataRO.wrap(buffer, index, index + length); + onConnectionData(data); + break; + case EndFW.TYPE_ID: + final EndFW end = endRO.wrap(buffer, index, index + length); + onConnectionEnd(end); + break; + case AbortFW.TYPE_ID: + final AbortFW abort = abortRO.wrap(buffer, index, index + length); + onConnectionAbort(abort); + break; + case SignalFW.TYPE_ID: + final SignalFW signal = signalRO.wrap(buffer, index, index + length); + onConnectionSignal(signal); + break; + case ResetFW.TYPE_ID: + final ResetFW reset = resetRO.wrap(buffer, index, index + length); + onConnectionReset(reset); + break; + case WindowFW.TYPE_ID: + final WindowFW window = windowRO.wrap(buffer, index, index + length); + onConnectionWindow(window); + break; + default: + break; + } + } + + private void onConnectionBegin( + BeginFW begin) + { + final long authorization = begin.authorization(); + final long traceId = begin.traceId(); + + state = KafkaState.openingReply(state); + + doConnectionWindow(traceId, authorization, 0, replyPad, replyMax); + } + + private void onConnectionData( + DataFW data) + { + final long sequence = data.sequence(); + final long acknowledge = data.acknowledge(); + final long traceId = data.traceId(); + final int flags = data.flags(); + final int reserved = data.reserved(); + final OctetsFW payload = data.payload(); + final OctetsFW extension = data.extension(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence + reserved; + + assert replyAck <= replySeq; + assert replySeq <= replyAck + replyMax; + + final DirectBuffer buffer = payload.buffer(); + final int limit = payload.limit(); + int progress = payload.offset(); + + while (progress < limit) + { + final int beforeResponseBytes = responseBytes; + if (responseBytes == 0) + { + final ResponseHeaderFW responseHeader = responseHeaderRO.wrap(buffer, progress, limit); + responseBytes = responseHeader.length() + KAFKA_FRAME_LENGTH_FIELD_OFFSET; + } + + final int responseBytesMin = Math.min(responseBytes, payload.sizeof()); + responseBytes -= responseBytesMin; + assert responseBytes >= 0; + + long initialId = correlations.peekLong(); + + KafkaClientStream stream = streamsByInitialIds.get(initialId); + + stream.doStreamData(traceId, flags | FLAG_INIT | FLAG_FIN, sequence, + acknowledge, reserved, buffer, progress, responseBytesMin, extension); + + progress += responseBytesMin; + + if (responseBytes == 0) + { + correlations.remove(); + } + } + } + + private void onConnectionEnd( + EndFW end) + { + final long sequence = end.sequence(); + final long acknowledge = end.acknowledge(); + final long traceId = end.traceId(); + + assert acknowledge <= sequence; + assert sequence >= replySeq; + + replySeq = sequence; + state = KafkaState.closedReply(state); + + assert replyAck <= replySeq; + + doConnectionEnd(traceId); + + streamsByInitialIds.forEach((k, v) -> v.cleanupStream(traceId)); + } + + private void onConnectionAbort( + AbortFW abort) + { + final long traceId = abort.traceId(); + + doConnectionAbort(traceId); + + streamsByInitialIds.forEach((k, v) -> v.cleanupStream(traceId)); + } + + private void onConnectionSignal( + SignalFW signal) + { + final int signalId = signal.signalId(); + final int contextId = signal.contextId(); + + if (signalId == SIGNAL_CONNECTION_CLEANUP) + { + doSignalStreamCleanup(); + } + else + { + long initialId = signalerCorrelations.remove(contextId); + KafkaClientStream stream = streamsByInitialIds.get(initialId); + + if (stream != null) + { + stream.onSignal(signal); + } + } + } + + private void onConnectionReset( + ResetFW reset) + { + final long traceId = reset.traceId(); + + doConnectionReset(traceId); + + streamsByInitialIds.forEach((k, v) -> v.cleanupStream(traceId)); + } + + private void onConnectionWindow( + WindowFW window) + { + final long sequence = window.sequence(); + final long acknowledge = window.acknowledge(); + final int maximum = window.maximum(); + final long traceId = window.traceId(); + final long budgetId = window.budgetId(); + final int padding = window.padding(); + final int minimum = window.minimum(); + + assert acknowledge <= sequence; + assert sequence <= initialSeq; + assert acknowledge >= initialAck; + assert maximum >= initialMax; + + final int credit = (int)(acknowledge - initialAck) + (maximum - initialMax); + assert credit >= 0; + + this.initialAck = acknowledge; + this.initialMax = maximum; + this.initialMin = minimum; + this.initialPad = padding; + this.initialBud = budgetId; + + assert replyAck <= replySeq; + + if (KafkaState.replyOpening(state)) + { + state = KafkaState.openedReply(state); + if (connectionInitialBudgetId == NO_BUDGET_ID) + { + connectionInitialBudgetId = creditor.acquire(initialId, budgetId); + } + } + + if (connectionInitialBudgetId != NO_BUDGET_ID) + { + creditor.credit(traceId, connectionInitialBudgetId, credit); + } + + streamsByInitialIds.forEach((k, v) -> + v.doStreamWindow(authorization, traceId, connectionInitialBudgetId, initialPad)); + } + + private void doSignalStreamCleanup() + { + this.reconnectAt = signaler.delegate.signalAt( + currentTimeMillis() + 4000, + SIGNAL_CONNECTION_CLEANUP, + this::onStreamCleanupSignal); + } + + + private void onStreamCleanupSignal( + int signalId) + { + assert signalId == SIGNAL_CONNECTION_CLEANUP; + + if (streamsByInitialIds.isEmpty()) + { + final long traceId = supplyTraceId.getAsLong(); + cleanupConnection(traceId); + correlations.clear(); + } + } + + private void cleanupConnection( + long traceId) + { + doConnectionAbort(traceId); + doConnectionReset(traceId); + + cleanupBudgetCreditorIfNecessary(); + } + + private void cleanupBudgetCreditorIfNecessary() + { + if (connectionInitialBudgetId != NO_CREDITOR_INDEX) + { + creditor.release(connectionInitialBudgetId); + connectionInitialBudgetId = NO_CREDITOR_INDEX; + } + } + + @Override + public MessageConsumer newStream( + int msgTypeId, + DirectBuffer buffer, + int index, + int length, + MessageConsumer sender) + { + final BeginFW begin = beginRO.wrap(buffer, index, index + length); + final long originId = begin.originId(); + final long routedId = begin.routedId(); + final long initialId = begin.streamId(); + final long authorization = begin.authorization(); + + KafkaClientStream stream = new KafkaClientStream(this, sender, originId, routedId, initialId, authorization); + streamsByInitialIds.put(initialId, stream); + + return stream::onStreamMessage; + } + } +} diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java index 99f87b14b0..75c397c8b0 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientFactory.java @@ -32,6 +32,7 @@ import io.aklivity.zilla.runtime.engine.EngineContext; import io.aklivity.zilla.runtime.engine.binding.BindingHandler; import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer; +import io.aklivity.zilla.runtime.engine.concurrent.Signaler; import io.aklivity.zilla.runtime.engine.config.BindingConfig; public final class KafkaClientFactory implements KafkaStreamFactory @@ -52,6 +53,15 @@ public KafkaClientFactory( final Long2ObjectHashMap bindings = new Long2ObjectHashMap<>(); final KafkaMergedBudgetAccountant accountant = new KafkaMergedBudgetAccountant(context); + final KafkaClientConnectionPool connectionPool = new KafkaClientConnectionPool( + config, context, accountant.creditor()); + + final BindingHandler streamFactory = config.clientConnectionPool() ? connectionPool.streamFactory() : + context.streamFactory(); + + final Signaler signaler = config.clientConnectionPool() ? connectionPool.signaler() : + context.signaler(); + final KafkaClientMetaFactory clientMetaFactory = new KafkaClientMetaFactory( config, context, bindings::get, accountant::supplyDebitor, supplyClientRoute); @@ -59,7 +69,7 @@ public KafkaClientFactory( config, context, bindings::get, accountant::supplyDebitor); final KafkaClientGroupFactory clientGroupFactory = new KafkaClientGroupFactory( - config, context, bindings::get, accountant::supplyDebitor); + config, context, bindings::get, accountant::supplyDebitor, signaler, streamFactory); final KafkaClientFetchFactory clientFetchFactory = new KafkaClientFetchFactory( config, context, bindings::get, accountant::supplyDebitor, supplyClientRoute); diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java index 34c1aacdd8..6b833a0b7e 100644 --- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java +++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java @@ -16,6 +16,9 @@ package io.aklivity.zilla.runtime.binding.kafka.internal.stream; import static io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressProtocol.STREAM; +import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_BUDGET_ID; +import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_CREDITOR_INDEX; +import static io.aklivity.zilla.runtime.engine.budget.BudgetDebitor.NO_DEBITOR_INDEX; import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT; import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID; import static java.lang.System.currentTimeMillis; @@ -265,6 +268,7 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp private final BindingHandler streamFactory; private final LongFunction supplyBinding; private final Supplier supplyInstanceId; + private final LongFunction supplyDebitor; private final Long2ObjectHashMap instanceIds; private final Object2ObjectHashMap groupStreams; private final String clientId; @@ -275,13 +279,15 @@ public KafkaClientGroupFactory( KafkaConfiguration config, EngineContext context, LongFunction supplyBinding, - LongFunction supplyDebitor) + LongFunction supplyDebitor, + Signaler signaler, + BindingHandler streamFactory) { super(config, context); this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME); this.proxyTypeId = context.supplyTypeId("proxy"); - this.signaler = context.signaler(); - this.streamFactory = context.streamFactory(); + this.signaler = signaler; + this.streamFactory = streamFactory; this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]); this.decodePool = context.bufferPool(); @@ -290,6 +296,7 @@ public KafkaClientGroupFactory( this.rebalanceTimeout = config.clientGroupRebalanceTimeout(); this.clientId = config.clientId(); this.supplyInstanceId = config.clientInstanceIdSupplier(); + this.supplyDebitor = supplyDebitor; this.instanceIds = new Long2ObjectHashMap<>(); this.groupStreams = new Object2ObjectHashMap<>(); } @@ -307,7 +314,6 @@ public MessageConsumer newStream( final long routedId = begin.routedId(); final long initialId = begin.streamId(); final long affinity = begin.affinity(); - final long traceId = begin.traceId(); final long authorization = begin.authorization(); final OctetsFW extension = begin.extension(); final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()); @@ -802,7 +808,7 @@ private int decodeReject( int progress, int limit) { - client.doNetworkResetIfNecessary(traceId); + client.doNetworkReset(traceId); client.decoder = decodeIgnoreAll; return limit; } @@ -1691,12 +1697,15 @@ private final class ClusterClient extends KafkaSaslClient private long initialSeq; private long initialAck; + private int initialMin; private int initialMax; private int initialPad; - private long initialBudgetId; + private long initialBudgetId = NO_BUDGET_ID; + private long initialDebIndex = NO_DEBITOR_INDEX; private long replySeq; private long replyAck; + private long replyBud; private int replyMax; private int encodeSlot = NO_SLOT; @@ -1709,6 +1718,7 @@ private final class ClusterClient extends KafkaSaslClient private int nextResponseId; + private BudgetDebitor initialDeb; private KafkaGroupClusterClientDecoder decoder; private LongLongConsumer encoder; @@ -1790,6 +1800,7 @@ private void onNetworkData( replySeq = sequence + data.reserved(); authorization = data.authorization(); + replyBud = budgetId; assert replyAck <= replySeq; @@ -1862,6 +1873,7 @@ private void onNetworkWindow( { final long sequence = window.sequence(); final long acknowledge = window.acknowledge(); + final int minimum = window.minimum(); final int maximum = window.maximum(); final long traceId = window.traceId(); final long budgetId = window.budgetId(); @@ -1875,6 +1887,7 @@ private void onNetworkWindow( this.initialAck = acknowledge; this.initialMax = maximum; this.initialPad = padding; + this.initialMin = minimum; this.initialBudgetId = budgetId; assert initialAck <= initialSeq; @@ -1883,15 +1896,28 @@ private void onNetworkWindow( state = KafkaState.openedInitial(state); + if (initialBudgetId != NO_BUDGET_ID && initialDebIndex == NO_DEBITOR_INDEX) + { + initialDeb = supplyDebitor.apply(initialBudgetId); + initialDebIndex = initialDeb.acquire(initialBudgetId, initialId, this::doNetworkDataIfNecessary); + assert initialDebIndex != NO_DEBITOR_INDEX; + } + + doNetworkDataIfNecessary(budgetId); + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void doNetworkDataIfNecessary( + long traceId) + { if (encodeSlot != NO_SLOT) { final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); final int limit = encodeSlotOffset; - encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); + encodeNetwork(traceId, authorization, initialBudgetId, buffer, 0, limit); } - - doEncodeRequestIfNecessary(traceId, budgetId); } private void onNetworkSignal( @@ -2089,13 +2115,32 @@ private void encodeNetwork( int offset, int limit) { - final int maxLength = limit - offset; - final int initialWin = initialMax - (int)(initialSeq - initialAck); - final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + final int length = limit - offset; + final int lengthMin = Math.min(length, 1024); + final int initialBudget = Math.max(initialMax - (int)(initialSeq - initialAck), 0); + final int reservedMax = Math.max(Math.min(length + initialPad, initialBudget), initialMin); + final int reservedMin = Math.max(Math.min(lengthMin + initialPad, reservedMax), initialMin); - if (length > 0) + int reserved = reservedMax; + + flush: + if (reserved > 0) { - final int reserved = length + initialPad; + + boolean claimed = false; + + if (initialDebIndex != NO_DEBITOR_INDEX) + { + final int lengthMax = Math.min(reserved - initialPad, length); + final int deferredMax = length - lengthMax; + reserved = initialDeb.claim(traceId, initialDebIndex, initialId, reservedMin, reserved, deferredMax); + claimed = reserved > 0; + } + + if (reserved < initialPad || reserved == initialPad && length > 0) + { + break flush; + } doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); @@ -2105,7 +2150,8 @@ private void encodeNetwork( assert initialAck <= initialSeq; } - final int remaining = maxLength - length; + final int flushed = Math.max(reserved - initialPad, 0); + final int remaining = length - flushed; if (remaining > 0) { if (encodeSlot == NO_SLOT) @@ -2120,7 +2166,7 @@ private void encodeNetwork( else { final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); - encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeBuffer.putBytes(0, buffer, offset + flushed, remaining); encodeSlotOffset = remaining; } } @@ -2353,9 +2399,11 @@ private final class DescribeClient extends KafkaSaslClient private long initialSeq; private long initialAck; + private int initialMin; private int initialMax; private int initialPad; - private long initialBudgetId; + private long initialBudgetId = NO_BUDGET_ID; + private long initialDebIndex = NO_CREDITOR_INDEX; private long replySeq; private long replyAck; @@ -2373,6 +2421,7 @@ private final class DescribeClient extends KafkaSaslClient private KafkaDescribeClientDecoder decoder; private LongLongConsumer encoder; + private BudgetDebitor initialDeb; DescribeClient( long originId, @@ -2550,6 +2599,7 @@ private void onNetworkWindow( { final long sequence = window.sequence(); final long acknowledge = window.acknowledge(); + final int minimum = window.minimum(); final int maximum = window.maximum(); final long traceId = window.traceId(); final long budgetId = window.budgetId(); @@ -2563,6 +2613,7 @@ private void onNetworkWindow( this.initialAck = acknowledge; this.initialMax = maximum; this.initialPad = padding; + this.initialMin = minimum; this.initialBudgetId = budgetId; assert initialAck <= initialSeq; @@ -2571,6 +2622,21 @@ private void onNetworkWindow( state = KafkaState.openedInitial(state); + if (initialBudgetId != NO_BUDGET_ID && initialDebIndex == NO_DEBITOR_INDEX) + { + initialDeb = supplyDebitor.apply(initialBudgetId); + initialDebIndex = initialDeb.acquire(initialBudgetId, initialId, this::doNetworkDataIfNecessary); + assert initialDebIndex != NO_DEBITOR_INDEX; + } + + doNetworkDataIfNecessary(budgetId); + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void doNetworkDataIfNecessary( + long budgetId) + { if (encodeSlot != NO_SLOT) { final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); @@ -2578,8 +2644,6 @@ private void onNetworkWindow( encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); } - - doEncodeRequestIfNecessary(traceId, budgetId); } private void onNetworkSignal( @@ -2650,7 +2714,7 @@ private void doNetworkEnd( traceId, authorization, EMPTY_EXTENSION); } - private void doNetworkAbortIfNecessary( + private void doNetworkAbort( long traceId) { if (!KafkaState.initialClosed(state)) @@ -2663,7 +2727,7 @@ private void doNetworkAbortIfNecessary( cleanupEncodeSlotIfNecessary(); } - private void doNetworkResetIfNecessary( + private void doNetworkReset( long traceId) { if (!KafkaState.replyClosed(state)) @@ -2782,13 +2846,32 @@ private void encodeNetwork( int offset, int limit) { - final int maxLength = limit - offset; - final int initialWin = initialMax - (int)(initialSeq - initialAck); - final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + final int length = limit - offset; + final int lengthMin = Math.min(length, 1024); + final int initialBudget = Math.max(initialMax - (int)(initialSeq - initialAck), 0); + final int reservedMax = Math.max(Math.min(length + initialPad, initialBudget), initialMin); + final int reservedMin = Math.max(Math.min(lengthMin + initialPad, reservedMax), initialMin); - if (length > 0) + int reserved = reservedMax; + + flush: + if (reserved > 0) { - final int reserved = length + initialPad; + + boolean claimed = false; + + if (initialDebIndex != NO_DEBITOR_INDEX) + { + final int lengthMax = Math.min(reserved - initialPad, length); + final int deferredMax = length - lengthMax; + reserved = initialDeb.claim(traceId, initialDebIndex, initialId, reservedMin, reserved, deferredMax); + claimed = reserved > 0; + } + + if (reserved < initialPad || reserved == initialPad && length > 0) + { + break flush; + } doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); @@ -2798,7 +2881,8 @@ private void encodeNetwork( assert initialAck <= initialSeq; } - final int remaining = maxLength - length; + final int flushed = Math.max(reserved - initialPad, 0); + final int remaining = length - flushed; if (remaining > 0) { if (encodeSlot == NO_SLOT) @@ -2813,7 +2897,7 @@ private void encodeNetwork( else { final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); - encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeBuffer.putBytes(0, buffer, offset + flushed, remaining); encodeSlotOffset = remaining; } } @@ -2985,8 +3069,8 @@ else if (delegate.timeout > timeoutMax) private void cleanupNetwork( long traceId) { - doNetworkResetIfNecessary(traceId); - doNetworkAbortIfNecessary(traceId); + doNetworkAbort(traceId); + doNetworkReset(traceId); } private void cleanupDecodeSlotIfNecessary() @@ -3030,9 +3114,11 @@ private final class CoordinatorClient extends KafkaSaslClient private long initialSeq; private long initialAck; + private int initialMin; private int initialMax; private int initialPad; - private long initialBudgetId; + private long initialBudgetId = NO_BUDGET_ID; + private long initialDebIndex = NO_CREDITOR_INDEX; private long replySeq; private long replyAck; @@ -3056,6 +3142,7 @@ private final class CoordinatorClient extends KafkaSaslClient private KafkaGroupCoordinatorClientDecoder decoder; private LongLongConsumer encoder; private OctetsFW assignment = EMPTY_OCTETS; + private BudgetDebitor initialDeb; CoordinatorClient( long originId, @@ -3215,6 +3302,7 @@ private void onNetworkWindow( { final long sequence = window.sequence(); final long acknowledge = window.acknowledge(); + final int minimum = window.minimum(); final int maximum = window.maximum(); final long traceId = window.traceId(); final long budgetId = window.budgetId(); @@ -3228,6 +3316,7 @@ private void onNetworkWindow( this.initialAck = acknowledge; this.initialMax = maximum; this.initialPad = padding; + this.initialMin = minimum; this.initialBudgetId = budgetId; assert initialAck <= initialSeq; @@ -3236,6 +3325,20 @@ private void onNetworkWindow( state = KafkaState.openedInitial(state); + if (initialBudgetId != NO_BUDGET_ID && initialDebIndex == NO_DEBITOR_INDEX) + { + initialDeb = supplyDebitor.apply(initialBudgetId); + initialDebIndex = initialDeb.acquire(initialBudgetId, initialId, this::doNetworkDataIfNecessary); + assert initialDebIndex != NO_DEBITOR_INDEX; + } + + doNetworkDataIfNecessary(budgetId); + + doEncodeRequestIfNecessary(traceId, budgetId); + } + + private void doNetworkDataIfNecessary(long budgetId) + { if (encodeSlot != NO_SLOT) { final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot); @@ -3243,8 +3346,6 @@ private void onNetworkWindow( encodeNetwork(encodeSlotTraceId, authorization, budgetId, buffer, 0, limit); } - - doEncodeRequestIfNecessary(traceId, budgetId); } private void onNetworkSignal( @@ -3842,13 +3943,32 @@ private void encodeNetwork( int offset, int limit) { - final int maxLength = limit - offset; - final int initialWin = initialMax - (int)(initialSeq - initialAck); - final int length = Math.max(Math.min(initialWin - initialPad, maxLength), 0); + final int length = limit - offset; + final int lengthMin = Math.min(length, 1024); + final int initialBudget = Math.max(initialMax - (int)(initialSeq - initialAck), 0); + final int reservedMax = Math.max(Math.min(length + initialPad, initialBudget), initialMin); + final int reservedMin = Math.max(Math.min(lengthMin + initialPad, reservedMax), initialMin); - if (length > 0) + int reserved = reservedMax; + + flush: + if (reserved > 0) { - final int reserved = length + initialPad; + + boolean claimed = false; + + if (initialDebIndex != NO_DEBITOR_INDEX) + { + final int lengthMax = Math.min(reserved - initialPad, length); + final int deferredMax = length - lengthMax; + reserved = initialDeb.claim(traceId, initialDebIndex, initialId, reservedMin, reserved, deferredMax); + claimed = reserved > 0; + } + + if (reserved < initialPad || reserved == initialPad && length > 0) + { + break flush; + } doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax, traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION); @@ -3858,7 +3978,8 @@ private void encodeNetwork( assert initialAck <= initialSeq; } - final int remaining = maxLength - length; + final int flushed = Math.max(reserved - initialPad, 0); + final int remaining = length - flushed; if (remaining > 0) { if (encodeSlot == NO_SLOT) @@ -3873,7 +3994,7 @@ private void encodeNetwork( else { final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot); - encodeBuffer.putBytes(0, buffer, offset + length, remaining); + encodeBuffer.putBytes(0, buffer, offset + flushed, remaining); encodeSlotOffset = remaining; } } diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java index 377abe001a..33670fffc3 100644 --- a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java +++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/ClientGroupIT.java @@ -47,7 +47,6 @@ public class ClientGroupIT @Rule public final TestRule chain = outerRule(engine).around(k3po).around(timeout); - @Test @Configuration("client.yaml") @Specification({ diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt index cf59d81824..2576124653 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt @@ -17,6 +17,7 @@ connect "zilla://streams/app0" option zilla:window 8192 option zilla:transmission "half-duplex" + option zilla:budgetId 1 write zilla:begin.ext ${kafka:beginEx() .typeId(zilla:id("kafka")) diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt index 1012892d86..e41fd7e95e 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/client.rpt @@ -36,6 +36,3 @@ write 22 # size 4s "test" # "session" coordinator key [0x00] # coordinator group type - -write abort -read aborted diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt index 1db15e2632..92a221ad43 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/client.sent.write.abort.before.coordinator.response/server.rpt @@ -30,6 +30,3 @@ read 22 # size 5s "zilla" # no client id 4s "test" # "session" coordinator key [0x00] # coordinator group type - -read aborted -write abort diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt index 8fd214ccc1..b303cce4f2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/client.rpt @@ -63,19 +63,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 32s # describe configs 0s # v0 @@ -160,7 +147,7 @@ write 129 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt index 8330081a0e..6a4ca11754 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.not.available/server.rpt @@ -59,13 +59,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected - read 82 # size 32s # describe configs 0s # v0 @@ -145,7 +138,7 @@ read 129 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt index 1e38de9c9d..13114776fb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/client.rpt @@ -46,18 +46,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER_FIRST - -connect await ROUTED_CLUSTER_SERVER_FIRST - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected write 82 # size 32s # describe configs @@ -91,55 +79,6 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER - -connect await ROUTED_DESCRIBE_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - -write 119 # size - 11s # join group - 5s # v5 - ${newRequestId} - 5s "zilla" # client id - 4s "test" # consumer group - 30000 # session timeout - 4000 # rebalance timeout - 0s # consumer group member - 42s ${instanceId} # group instance id - 8s "consumer" # protocol type - 1 # group protocol - 10s "highlander" # protocol name - 14 # metadata size - ${kafka:randomBytes(14)} # metadata - -read 24 # size - (int:newRequestId) - 0 # throttle time - 16s # not a coordinator for a consumer - -1 # generated id - 0s # protocol name - 0s # leader id - 0s # not a coordinator for a consumer - 0 # members - -write close -read abort - -read notify ROUTED_BROKER_SERVER_SECOND - -connect await ROUTED_BROKER_SERVER_SECOND - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 22 # size 10s # find coordinator 1s # v1 @@ -157,19 +96,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER_SECOND - -connect await ROUTED_CLUSTER_SERVER_SECOND - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 32s # describe configs 0s # v0 @@ -202,9 +128,9 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER_SECOND +read notify ROUTED_DESCRIBE_SERVER -connect await ROUTED_DESCRIBE_SERVER_SECOND +connect await ROUTED_DESCRIBE_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -213,6 +139,32 @@ connect await ROUTED_DESCRIBE_SERVER_SECOND connected write 119 # size + 11s # join group + 5s # v5 + ${newRequestId} + 5s "zilla" # client id + 4s "test" # consumer group + 30000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s ${instanceId} # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 14 # metadata size + ${kafka:randomBytes(14)} # metadata + +read 24 # size + (int:newRequestId) + 0 # throttle time + 16s # not a coordinator for a consumer + -1 # generated id + 0s # protocol name + 0s # leader id + 0s # not a coordinator for a consumer + 0 # members + +write 119 # size 11s # join group 5s # v5 ${newRequestId} @@ -228,7 +180,7 @@ write 119 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt index 9dfa468847..32bdf749d7 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/coordinator.reject.invalid.consumer/server.rpt @@ -42,13 +42,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected - read 82 # size 32s # describe configs 0s # v0 @@ -81,42 +74,6 @@ write 103 # size [0x00] # not default [0x00] # not sensitive -accepted - -connected - -read 119 # size - 11s # join group - 5s # v5 - (int:newRequestId) - 5s "zilla" # client id - 4s "test" # consumer group - 30000 # session timeout - 4000 # rebalance timeout - 0s # consumer group member - 42s [0..42] # group instance id - 8s "consumer" # protocol type - 1 # group protocol - 10s "highlander" # protocol name - 14 # metadata size - [0..14] # metadata - -write 24 # size - ${newRequestId} - 0 # throttle time - 16s # not a coordinator for a consumer - -1 # generated id - 0s # protocol name - 0s # leader id - 0s # not a coordinator for a consumer - 0 # members - -read closed -write aborted - -accepted - -connected read 22 # size 10s # find coordinator @@ -135,12 +92,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected read 82 # size 32s # describe configs @@ -178,6 +129,32 @@ accepted connected +read 119 # size + 11s # join group + 5s # v5 + (int:newRequestId) + 5s "zilla" # client id + 4s "test" # consumer group + 30000 # session timeout + 4000 # rebalance timeout + 0s # consumer group member + 42s [0..42] # group instance id + 8s "consumer" # protocol type + 1 # group protocol + 10s "highlander" # protocol name + 14 # metadata size + [0..14] # metadata + +write 24 # size + ${newRequestId} + 0 # throttle time + 16s # not a coordinator for a consumer + -1 # generated id + 0s # protocol name + 0s # leader id + 0s # not a coordinator for a consumer + 0 # members + read 119 # size 11s # join group 5s # v5 @@ -194,7 +171,7 @@ read 119 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt index 089a7589ae..1cae9a8eab 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/client.rpt @@ -46,19 +46,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 32s # describe configs 0s # v0 @@ -91,9 +78,9 @@ read 103 # size [0x00] # not default [0x00] # not sensitive -read notify ROUTED_DESCRIBE_SERVER +read notify ROUTED_CLUSTER_SERVER -connect await ROUTED_DESCRIBE_SERVER +connect await ROUTED_CLUSTER_SERVER "zilla://streams/net0" option zilla:window ${networkConnectWindow} option zilla:transmission "duplex" @@ -143,7 +130,7 @@ write 129 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt index b0489f13c4..e0c1a8cfa2 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/ignore.heartbeat.before.handshake/server.rpt @@ -42,13 +42,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected - read 82 # size 32s # describe configs 0s # v0 @@ -128,7 +121,7 @@ read 129 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt index 857f32f290..c31cdb5d8a 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt @@ -46,18 +46,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER_FIRST - -connect await ROUTED_CLUSTER_SERVER_FIRST - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected write 82 # size 32s # describe configs @@ -117,7 +105,7 @@ write 119 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -150,7 +138,7 @@ write 129 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt index 2201fa8c53..778f9fabf9 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt @@ -42,12 +42,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected read 82 # size 32s # describe configs @@ -101,7 +95,7 @@ read 119 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error @@ -134,7 +128,7 @@ read 129 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt index 59668c7c17..c9ab07627c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/client.rpt @@ -46,19 +46,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER_FIRST - -connect await ROUTED_CLUSTER_SERVER_FIRST - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 32s # describe configs 0s # v0 @@ -117,7 +104,7 @@ write 119 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -169,7 +156,7 @@ write 129 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt index 239ecb7721..c5e9f2aab8 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.migrate.leader/server.rpt @@ -42,13 +42,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected - read 82 # size 32s # describe configs 0s # v0 @@ -101,7 +94,7 @@ read 119 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error @@ -154,7 +147,7 @@ read 129 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt index 37da5b3dc0..c6026dc79d 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/client.rpt @@ -46,19 +46,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 32s # describe configs 0s # v0 @@ -143,7 +130,7 @@ write 119 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt index 3177ca86cd..0e0d7f4050 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander.unknown.member.id/server.rpt @@ -42,13 +42,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected - read 82 # size 32s # describe configs 0s # v0 @@ -128,7 +121,7 @@ read 119 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt index bef50aaa64..89b17f863c 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/client.rpt @@ -46,19 +46,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 32s # describe configs 0s # v0 @@ -143,7 +130,7 @@ write 129 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -211,7 +198,7 @@ write 129 # size ${kafka:randomBytes(14)} # metadata -read 198 # size +read 202 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -274,5 +261,3 @@ read 70 # size 10s "memberId-1" # consumer member group id 42s [0..42] # group instance id -write close -read abort diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt index f37715afb3..efa653a695 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.highlander/server.rpt @@ -42,13 +42,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected - read 82 # size 32s # describe configs 0s # v0 @@ -128,7 +121,7 @@ read 129 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error @@ -196,7 +189,7 @@ read 129 # size [0..14] # metadata -write 198 # size +write 202 # size ${newRequestId} 0 # throttle time 0s # no error @@ -259,5 +252,3 @@ write 70 # size 10s "memberId-1" # consumer member group id 42s ${instanceId} # group instance id -read closed -write aborted diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt index f5fe2b5722..0c69f2053f 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/client.rpt @@ -46,18 +46,6 @@ read 35 # size 9s "localhost" # host 9092 #port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected write 82 # size 32s # describe configs @@ -118,7 +106,7 @@ write 116 # size ${kafka:randomBytes(14)} # metadata -read 123 # size +read 125 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt index d8b52c2c49..ca7dadf2fb 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.protocol.unknown/server.rpt @@ -42,12 +42,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected read 82 # size 32s # describe configs @@ -101,7 +95,7 @@ read 116 # size 14 # metadata size [0..14] # metadata -write 123 # size +write 125 # size ${newRequestId} 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt index 2aaa64152a..be91dbfc41 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/client.rpt @@ -46,19 +46,6 @@ read 35 # size 9s "localhost" # host 9092 # port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected - write 82 # size 32s # describe configs 0s # v0 @@ -117,7 +104,7 @@ write 119 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error @@ -169,7 +156,7 @@ write 129 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt index f4e31e22f9..9c62766536 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.f1.j5.s3.l3.h3/rebalance.sync.group/server.rpt @@ -42,13 +42,6 @@ write 35 # size 9s "localhost" # host 9092 # port -read closed -write aborted - -accepted - -connected - read 82 # size 32s # describe configs 0s # v0 @@ -101,7 +94,7 @@ read 119 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error @@ -153,7 +146,7 @@ read 129 # size 14 # metadata size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt index 8a1a9cbec0..61af07c325 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/client.rpt @@ -75,18 +75,6 @@ read 35 # size 9s "localhost" #host 9092 #port -write close -read abort - -read notify ROUTED_CLUSTER_SERVER - -connect await ROUTED_CLUSTER_SERVER - "zilla://streams/net0" - option zilla:window ${networkConnectWindow} - option zilla:transmission "duplex" - option zilla:byteorder "network" - -connected write 17 # size 17s # sasl.handshake @@ -204,7 +192,7 @@ write 119 # size 14 # metadata size ${kafka:randomBytes(14)} # metadata -read 126 # size +read 128 # size (int:newRequestId) 0 # throttle time 0s # no error diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt index 77e185fbff..28a7a331d0 100644 --- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt +++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/group.sasl.f1.j5.s3.l3.h3.handshake.v1/leader/server.rpt @@ -71,12 +71,6 @@ write 35 # size 9s "localhost" #host 9092 #port -read closed -write aborted - -accepted - -connected read 17 # size 17s # sasl.handshake @@ -190,7 +184,7 @@ read 119 # size [0..14] # metadata -write 126 # size +write 128 # size ${newRequestId} 0 # throttle time 0s # no error From 44e6f8a738dbd2695074bf3019d9dc36613bcfb2 Mon Sep 17 00:00:00 2001 From: John Fallows Date: Sat, 23 Sep 2023 16:22:46 -0700 Subject: [PATCH 115/115] Prepare release 0.9.52 --- CHANGELOG.md | 110 +++++++++++++++++++++ build/flyweight-maven-plugin/pom.xml | 2 +- build/pom.xml | 2 +- cloud/docker-image/pom.xml | 2 +- cloud/helm-chart/pom.xml | 2 +- cloud/pom.xml | 2 +- conf/pom.xml | 2 +- incubator/binding-amqp.spec/pom.xml | 2 +- incubator/binding-amqp/pom.xml | 2 +- incubator/command-config/pom.xml | 2 +- incubator/command-dump/pom.xml | 2 +- incubator/command-log/pom.xml | 2 +- incubator/command-tune/pom.xml | 2 +- incubator/exporter-otlp.spec/pom.xml | 2 +- incubator/exporter-otlp/pom.xml | 2 +- incubator/pom.xml | 2 +- manager/pom.xml | 2 +- pom.xml | 2 +- runtime/binding-echo/pom.xml | 2 +- runtime/binding-fan/pom.xml | 2 +- runtime/binding-filesystem/pom.xml | 2 +- runtime/binding-grpc-kafka/pom.xml | 2 +- runtime/binding-grpc/pom.xml | 2 +- runtime/binding-http-filesystem/pom.xml | 2 +- runtime/binding-http-kafka/pom.xml | 2 +- runtime/binding-http/pom.xml | 2 +- runtime/binding-kafka-grpc/pom.xml | 2 +- runtime/binding-kafka/pom.xml | 2 +- runtime/binding-mqtt-kafka/pom.xml | 2 +- runtime/binding-mqtt/pom.xml | 2 +- runtime/binding-proxy/pom.xml | 2 +- runtime/binding-sse-kafka/pom.xml | 2 +- runtime/binding-sse/pom.xml | 2 +- runtime/binding-tcp/pom.xml | 2 +- runtime/binding-tls/pom.xml | 2 +- runtime/binding-ws/pom.xml | 2 +- runtime/command-metrics/pom.xml | 2 +- runtime/command-start/pom.xml | 2 +- runtime/command-stop/pom.xml | 2 +- runtime/command/pom.xml | 2 +- runtime/engine/pom.xml | 2 +- runtime/exporter-prometheus/pom.xml | 2 +- runtime/guard-jwt/pom.xml | 2 +- runtime/metrics-grpc/pom.xml | 2 +- runtime/metrics-http/pom.xml | 2 +- runtime/metrics-stream/pom.xml | 2 +- runtime/pom.xml | 2 +- runtime/vault-filesystem/pom.xml | 2 +- specs/binding-echo.spec/pom.xml | 2 +- specs/binding-fan.spec/pom.xml | 2 +- specs/binding-filesystem.spec/pom.xml | 2 +- specs/binding-grpc-kafka.spec/pom.xml | 2 +- specs/binding-grpc.spec/pom.xml | 2 +- specs/binding-http-filesystem.spec/pom.xml | 2 +- specs/binding-http-kafka.spec/pom.xml | 2 +- specs/binding-http.spec/pom.xml | 2 +- specs/binding-kafka-grpc.spec/pom.xml | 2 +- specs/binding-kafka.spec/pom.xml | 2 +- specs/binding-mqtt-kafka.spec/pom.xml | 2 +- specs/binding-mqtt.spec/pom.xml | 2 +- specs/binding-proxy.spec/pom.xml | 2 +- specs/binding-sse-kafka.spec/pom.xml | 2 +- specs/binding-sse.spec/pom.xml | 2 +- specs/binding-tcp.spec/pom.xml | 2 +- specs/binding-tls.spec/pom.xml | 2 +- specs/binding-ws.spec/pom.xml | 2 +- specs/engine.spec/pom.xml | 2 +- specs/exporter-prometheus.spec/pom.xml | 2 +- specs/guard-jwt.spec/pom.xml | 2 +- specs/metrics-grpc.spec/pom.xml | 2 +- specs/metrics-http.spec/pom.xml | 2 +- specs/metrics-stream.spec/pom.xml | 2 +- specs/pom.xml | 2 +- specs/vault-filesystem.spec/pom.xml | 2 +- 74 files changed, 183 insertions(+), 73 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 35cd8d74de..e0f3fa5c3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,11 +4,121 @@ [Full Changelog](https://github.com/aklivity/zilla/compare/0.9.51...HEAD) +**Implemented enhancements:** + +- Connection pool for `kafka` binding `heartbeat` requests [\#462](https://github.com/aklivity/zilla/issues/462) +- Enhance `mqtt` binding configuration syntax [\#420](https://github.com/aklivity/zilla/issues/420) +- Mqtt-Kafka session implementation [\#319](https://github.com/aklivity/zilla/issues/319) +- Design `zilla.yaml` configuration syntax for schema types [\#310](https://github.com/aklivity/zilla/issues/310) +- Generate `zilla.yaml` from `AsyncAPI` specification [\#256](https://github.com/aklivity/zilla/issues/256) +- Generate `zilla.yaml` from `OpenAPI` specification\(s\) [\#254](https://github.com/aklivity/zilla/issues/254) +- Support `kafka` consumer groups [\#215](https://github.com/aklivity/zilla/issues/215) +- MQTT guard implementation [\#307](https://github.com/aklivity/zilla/pull/307) ([bmaidics](https://github.com/bmaidics)) + +**Fixed bugs:** + +- Zilla crash during attempted WebSocket connection [\#391](https://github.com/aklivity/zilla/issues/391) +- Index out of bounds exception with HTTP-Kafka proxy [\#293](https://github.com/aklivity/zilla/issues/293) + **Closed issues:** +- Send will message as data frame + reject large packets [\#364](https://github.com/aklivity/zilla/issues/364) +- Support Kafka client request-response with MQTT clients [\#326](https://github.com/aklivity/zilla/issues/326) - Add guard support for MQTT binding [\#308](https://github.com/aklivity/zilla/issues/308) - Implement retained feature for mqtt-kafka [\#289](https://github.com/aklivity/zilla/issues/289) +**Merged pull requests:** + +- Mqtt client publish fix [\#464](https://github.com/aklivity/zilla/pull/464) ([bmaidics](https://github.com/bmaidics)) +- Fix implicit subscribe no packetId reconnection [\#451](https://github.com/aklivity/zilla/pull/451) ([bmaidics](https://github.com/bmaidics)) +- Remove clientId from subscribeKey [\#450](https://github.com/aklivity/zilla/pull/450) ([bmaidics](https://github.com/bmaidics)) +- Rename config command to generate [\#449](https://github.com/aklivity/zilla/pull/449) ([attilakreiner](https://github.com/attilakreiner)) +- Do not include generated subcsriptionId [\#448](https://github.com/aklivity/zilla/pull/448) ([bmaidics](https://github.com/bmaidics)) +- Adjust engine backoff strategy configuration [\#446](https://github.com/aklivity/zilla/pull/446) ([jfallows](https://github.com/jfallows)) +- Don't close group stream on cluster and describe streams closer [\#444](https://github.com/aklivity/zilla/pull/444) ([akrambek](https://github.com/akrambek)) +- Engine configuration worker capacity [\#443](https://github.com/aklivity/zilla/pull/443) ([jfallows](https://github.com/jfallows)) +- Remove unused engine configuration [\#442](https://github.com/aklivity/zilla/pull/442) ([jfallows](https://github.com/jfallows)) +- Ensure socket channel has finished connecting before attempting to read [\#441](https://github.com/aklivity/zilla/pull/441) ([jfallows](https://github.com/jfallows)) +- Mqtt subscription handling bugfix [\#439](https://github.com/aklivity/zilla/pull/439) ([bmaidics](https://github.com/bmaidics)) +- Connection pool for kafka group client [\#438](https://github.com/aklivity/zilla/pull/438) ([akrambek](https://github.com/akrambek)) +- Add affinity to mqtt server and client binding [\#436](https://github.com/aklivity/zilla/pull/436) ([bmaidics](https://github.com/bmaidics)) +- Set init flag for data fragmentation in grpc [\#431](https://github.com/aklivity/zilla/pull/431) ([akrambek](https://github.com/akrambek)) +- Fix flow control issue in kafka-grpc [\#430](https://github.com/aklivity/zilla/pull/430) ([akrambek](https://github.com/akrambek)) +- Fix known issues in group client [\#428](https://github.com/aklivity/zilla/pull/428) ([akrambek](https://github.com/akrambek)) +- Enhance mqtt binding configuration syntax [\#425](https://github.com/aklivity/zilla/pull/425) ([bmaidics](https://github.com/bmaidics)) +- Buffer fragmented kafka session signal messages [\#424](https://github.com/aklivity/zilla/pull/424) ([bmaidics](https://github.com/bmaidics)) +- Fix flow control bug [\#423](https://github.com/aklivity/zilla/pull/423) ([akrambek](https://github.com/akrambek)) +- Serverref change [\#422](https://github.com/aklivity/zilla/pull/422) ([bmaidics](https://github.com/bmaidics)) +- Fix finding next partition id [\#419](https://github.com/aklivity/zilla/pull/419) ([akrambek](https://github.com/akrambek)) +- Don't end subscribe stream when unsubscribe, no subscription [\#418](https://github.com/aklivity/zilla/pull/418) ([bmaidics](https://github.com/bmaidics)) +- Remove default kafka topic names [\#416](https://github.com/aklivity/zilla/pull/416) ([bmaidics](https://github.com/bmaidics)) +- Fix consumer assignment causing decoding issue [\#414](https://github.com/aklivity/zilla/pull/414) ([akrambek](https://github.com/akrambek)) +- Add test to validate merge produce rejection on wrong partition [\#410](https://github.com/aklivity/zilla/pull/410) ([akrambek](https://github.com/akrambek)) +- Consumer related bug fixes [\#405](https://github.com/aklivity/zilla/pull/405) ([akrambek](https://github.com/akrambek)) +- Remove unused extends OptionsConfig from non-options config classes [\#403](https://github.com/aklivity/zilla/pull/403) ([jfallows](https://github.com/jfallows)) +- Support consumer protocol [\#400](https://github.com/aklivity/zilla/pull/400) ([akrambek](https://github.com/akrambek)) +- Mqtt client implementation [\#398](https://github.com/aklivity/zilla/pull/398) ([bmaidics](https://github.com/bmaidics)) +- Support build after local docker zpm install [\#396](https://github.com/aklivity/zilla/pull/396) ([jfallows](https://github.com/jfallows)) +- Adapt to consumer group changes [\#394](https://github.com/aklivity/zilla/pull/394) ([bmaidics](https://github.com/bmaidics)) +- Bump actions/checkout from 3 to 4 [\#393](https://github.com/aklivity/zilla/pull/393) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Merged consumer group support [\#390](https://github.com/aklivity/zilla/pull/390) ([akrambek](https://github.com/akrambek)) +- Session expiry [\#387](https://github.com/aklivity/zilla/pull/387) ([bmaidics](https://github.com/bmaidics)) +- Request data length is non-negative [\#386](https://github.com/aklivity/zilla/pull/386) ([jfallows](https://github.com/jfallows)) +- Fix mqtt-kafka publish bug [\#383](https://github.com/aklivity/zilla/pull/383) ([bmaidics](https://github.com/bmaidics)) +- Support configuration property definitions for custom type... [\#382](https://github.com/aklivity/zilla/pull/382) ([jfallows](https://github.com/jfallows)) +- Mqtt kafka redirect [\#381](https://github.com/aklivity/zilla/pull/381) ([bmaidics](https://github.com/bmaidics)) +- Bump org.apache.ivy:ivy from 2.5.1 to 2.5.2 in /manager [\#377](https://github.com/aklivity/zilla/pull/377) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Generate zilla.yaml for asyncapi.mqtt.proxy from an AsyncAPI definition [\#375](https://github.com/aklivity/zilla/pull/375) ([attilakreiner](https://github.com/attilakreiner)) +- Review budget debitors [\#374](https://github.com/aklivity/zilla/pull/374) ([jfallows](https://github.com/jfallows)) +- Support binding config builder exit [\#373](https://github.com/aklivity/zilla/pull/373) ([jfallows](https://github.com/jfallows)) +- Support config builder for MQTT config [\#372](https://github.com/aklivity/zilla/pull/372) ([jfallows](https://github.com/jfallows)) +- Bump org.codehaus.mojo:exec-maven-plugin from 1.6.0 to 3.1.0 [\#370](https://github.com/aklivity/zilla/pull/370) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Generate zilla.yaml from an AsyncAPI definition [\#369](https://github.com/aklivity/zilla/pull/369) ([attilakreiner](https://github.com/attilakreiner)) +- Mqtt kafka will message delivery [\#367](https://github.com/aklivity/zilla/pull/367) ([bmaidics](https://github.com/bmaidics)) +- Bump org.apache.maven.plugins:maven-plugin-plugin from 3.5 to 3.9.0 [\#366](https://github.com/aklivity/zilla/pull/366) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump junit:junit from 4.13.1 to 4.13.2 [\#365](https://github.com/aklivity/zilla/pull/365) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Send will message as data frame + reject large packets [\#363](https://github.com/aklivity/zilla/pull/363) ([bmaidics](https://github.com/bmaidics)) +- Sanitize zip entry path [\#362](https://github.com/aklivity/zilla/pull/362) ([jfallows](https://github.com/jfallows)) +- Bump org.apache.maven:maven-core from 3.6.0 to 3.8.1 [\#361](https://github.com/aklivity/zilla/pull/361) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Merge consumer group metadata [\#359](https://github.com/aklivity/zilla/pull/359) ([akrambek](https://github.com/akrambek)) +- Support dynamic behavior injection in config builder fluent API [\#358](https://github.com/aklivity/zilla/pull/358) ([jfallows](https://github.com/jfallows)) +- Bump org.apache.maven.plugins:maven-jar-plugin from 3.2.0 to 3.3.0 [\#357](https://github.com/aklivity/zilla/pull/357) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump com.squareup:javapoet from 1.9.0 to 1.13.0 [\#355](https://github.com/aklivity/zilla/pull/355) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Include JDK 20 in build matrix [\#352](https://github.com/aklivity/zilla/pull/352) ([jfallows](https://github.com/jfallows)) +- Ignore CacheFetchIT.shouldFetchFilterSyncWithData [\#351](https://github.com/aklivity/zilla/pull/351) ([attilakreiner](https://github.com/attilakreiner)) +- Metadata for group merged stream [\#349](https://github.com/aklivity/zilla/pull/349) ([akrambek](https://github.com/akrambek)) +- Bump io.fabric8:docker-maven-plugin from 0.39.1 to 0.43.2 [\#348](https://github.com/aklivity/zilla/pull/348) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump org.sonatype.plexus:plexus-sec-dispatcher from 1.3 to 1.4 [\#347](https://github.com/aklivity/zilla/pull/347) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump io.kokuwa.maven:helm-maven-plugin from 6.6.0 to 6.10.0 [\#345](https://github.com/aklivity/zilla/pull/345) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump org.apache.maven.plugin-tools:maven-plugin-annotations from 3.5 to 3.9.0 [\#344](https://github.com/aklivity/zilla/pull/344) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump alpine from 3.18.2 to 3.18.3 in /cloud/docker-image/src/main/docker/release [\#343](https://github.com/aklivity/zilla/pull/343) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump eclipse-temurin from 17-alpine to 20-alpine in /cloud/docker-image/src/main/docker/incubator [\#342](https://github.com/aklivity/zilla/pull/342) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump alpine from 3.18.2 to 3.18.3 in /cloud/docker-image/src/main/docker/incubator [\#341](https://github.com/aklivity/zilla/pull/341) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump eclipse-temurin from 17-alpine to 20-alpine in /cloud/docker-image/src/main/docker/release [\#340](https://github.com/aklivity/zilla/pull/340) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump org.apache.maven.plugins:maven-compiler-plugin from 3.8.0 to 3.11.0 [\#339](https://github.com/aklivity/zilla/pull/339) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump actions/setup-java from 1 to 3 [\#338](https://github.com/aklivity/zilla/pull/338) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump actions/checkout from 2 to 3 [\#337](https://github.com/aklivity/zilla/pull/337) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump actions/cache from 2 to 3 [\#336](https://github.com/aklivity/zilla/pull/336) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump org.moditect:moditect-maven-plugin from 1.0.0.RC1 to 1.0.0.Final [\#335](https://github.com/aklivity/zilla/pull/335) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump com.mycila:license-maven-plugin from 4.1 to 4.2 [\#334](https://github.com/aklivity/zilla/pull/334) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump org.apache.maven.plugins:maven-source-plugin from 3.0.1 to 3.3.0 [\#333](https://github.com/aklivity/zilla/pull/333) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump byteman.version from 4.0.20 to 4.0.21 [\#332](https://github.com/aklivity/zilla/pull/332) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump antlr4.version from 4.11.1 to 4.13.0 [\#331](https://github.com/aklivity/zilla/pull/331) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Config builders [\#330](https://github.com/aklivity/zilla/pull/330) ([jfallows](https://github.com/jfallows)) +- Add hashKey support to merged stream [\#329](https://github.com/aklivity/zilla/pull/329) ([bmaidics](https://github.com/bmaidics)) +- Default group session timeout [\#328](https://github.com/aklivity/zilla/pull/328) ([akrambek](https://github.com/akrambek)) +- Include member count as part of group data ex [\#327](https://github.com/aklivity/zilla/pull/327) ([akrambek](https://github.com/akrambek)) +- Request-response mqtt-kafka [\#325](https://github.com/aklivity/zilla/pull/325) ([bmaidics](https://github.com/bmaidics)) +- Generate zilla.yaml from an OpenAPI definition [\#324](https://github.com/aklivity/zilla/pull/324) ([attilakreiner](https://github.com/attilakreiner)) +- Support zilla.yaml config reader and writer [\#323](https://github.com/aklivity/zilla/pull/323) ([jfallows](https://github.com/jfallows)) +- Ignore heartbeat if the handshake request hasn't completed yet [\#322](https://github.com/aklivity/zilla/pull/322) ([akrambek](https://github.com/akrambek)) +- Support local zpmw install [\#321](https://github.com/aklivity/zilla/pull/321) ([jfallows](https://github.com/jfallows)) +- Mqtt kafka sessions [\#318](https://github.com/aklivity/zilla/pull/318) ([bmaidics](https://github.com/bmaidics)) +- Mqtt kafka options [\#304](https://github.com/aklivity/zilla/pull/304) ([bmaidics](https://github.com/bmaidics)) +- Redirect on mqtt reset using server reference [\#303](https://github.com/aklivity/zilla/pull/303) ([bmaidics](https://github.com/bmaidics)) +- Mqtt retained feature [\#290](https://github.com/aklivity/zilla/pull/290) ([bmaidics](https://github.com/bmaidics)) +- Support Kafka consumer groups [\#262](https://github.com/aklivity/zilla/pull/262) ([akrambek](https://github.com/akrambek)) + ## [0.9.51](https://github.com/aklivity/zilla/tree/0.9.51) (2023-07-27) [Full Changelog](https://github.com/aklivity/zilla/compare/0.9.50...0.9.51) diff --git a/build/flyweight-maven-plugin/pom.xml b/build/flyweight-maven-plugin/pom.xml index 74d6a13033..5b1b1c4b68 100644 --- a/build/flyweight-maven-plugin/pom.xml +++ b/build/flyweight-maven-plugin/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla build - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/build/pom.xml b/build/pom.xml index 99a8e7a8fa..59ebf8517e 100644 --- a/build/pom.xml +++ b/build/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml index 9f84442be4..ee84364032 100644 --- a/cloud/docker-image/pom.xml +++ b/cloud/docker-image/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla cloud - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/cloud/helm-chart/pom.xml b/cloud/helm-chart/pom.xml index 853e06803a..e565712d38 100644 --- a/cloud/helm-chart/pom.xml +++ b/cloud/helm-chart/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla cloud - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/cloud/pom.xml b/cloud/pom.xml index 68c51f05c8..a704fed85b 100644 --- a/cloud/pom.xml +++ b/cloud/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/conf/pom.xml b/conf/pom.xml index 86622b6574..2f3f35627d 100644 --- a/conf/pom.xml +++ b/conf/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/binding-amqp.spec/pom.xml b/incubator/binding-amqp.spec/pom.xml index 40846d1a35..a84af1ea87 100644 --- a/incubator/binding-amqp.spec/pom.xml +++ b/incubator/binding-amqp.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/binding-amqp/pom.xml b/incubator/binding-amqp/pom.xml index 71c9c61d90..2a2ef7cc05 100644 --- a/incubator/binding-amqp/pom.xml +++ b/incubator/binding-amqp/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/command-config/pom.xml b/incubator/command-config/pom.xml index 4d8ec3933d..cc6f121890 100644 --- a/incubator/command-config/pom.xml +++ b/incubator/command-config/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/command-dump/pom.xml b/incubator/command-dump/pom.xml index 1a22886e53..ef5f6816d3 100644 --- a/incubator/command-dump/pom.xml +++ b/incubator/command-dump/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/command-log/pom.xml b/incubator/command-log/pom.xml index e90a647124..1858cdde56 100644 --- a/incubator/command-log/pom.xml +++ b/incubator/command-log/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/command-tune/pom.xml b/incubator/command-tune/pom.xml index 7374c89ed9..bef4dcbf15 100644 --- a/incubator/command-tune/pom.xml +++ b/incubator/command-tune/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/exporter-otlp.spec/pom.xml b/incubator/exporter-otlp.spec/pom.xml index c23892479f..e3cf97248c 100644 --- a/incubator/exporter-otlp.spec/pom.xml +++ b/incubator/exporter-otlp.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/exporter-otlp/pom.xml b/incubator/exporter-otlp/pom.xml index 48b162cc1b..828ae7928a 100644 --- a/incubator/exporter-otlp/pom.xml +++ b/incubator/exporter-otlp/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla incubator - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/incubator/pom.xml b/incubator/pom.xml index 4ea877d03a..d9e48c1a84 100644 --- a/incubator/pom.xml +++ b/incubator/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/manager/pom.xml b/manager/pom.xml index 11bc4d07ab..77d4e0d970 100644 --- a/manager/pom.xml +++ b/manager/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/pom.xml b/pom.xml index 8374dc08cc..1180678685 100644 --- a/pom.xml +++ b/pom.xml @@ -7,7 +7,7 @@ 4.0.0 io.aklivity.zilla zilla - develop-SNAPSHOT + 0.9.52 pom zilla https://github.com/aklivity/zilla diff --git a/runtime/binding-echo/pom.xml b/runtime/binding-echo/pom.xml index 37bc15776e..14f45bf7f5 100644 --- a/runtime/binding-echo/pom.xml +++ b/runtime/binding-echo/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-fan/pom.xml b/runtime/binding-fan/pom.xml index 77b5e0aab5..3ac2ef49e6 100644 --- a/runtime/binding-fan/pom.xml +++ b/runtime/binding-fan/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-filesystem/pom.xml b/runtime/binding-filesystem/pom.xml index 19dead7771..ce7d5bcae5 100644 --- a/runtime/binding-filesystem/pom.xml +++ b/runtime/binding-filesystem/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-grpc-kafka/pom.xml b/runtime/binding-grpc-kafka/pom.xml index a9952b0a68..ce78aae894 100644 --- a/runtime/binding-grpc-kafka/pom.xml +++ b/runtime/binding-grpc-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-grpc/pom.xml b/runtime/binding-grpc/pom.xml index 8bf0b5f396..7bdf8c951c 100644 --- a/runtime/binding-grpc/pom.xml +++ b/runtime/binding-grpc/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-http-filesystem/pom.xml b/runtime/binding-http-filesystem/pom.xml index 19d46db62e..2951509679 100644 --- a/runtime/binding-http-filesystem/pom.xml +++ b/runtime/binding-http-filesystem/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-http-kafka/pom.xml b/runtime/binding-http-kafka/pom.xml index dabc54fb58..ca7d696db5 100644 --- a/runtime/binding-http-kafka/pom.xml +++ b/runtime/binding-http-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-http/pom.xml b/runtime/binding-http/pom.xml index 15111be380..31fed0aee3 100644 --- a/runtime/binding-http/pom.xml +++ b/runtime/binding-http/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-kafka-grpc/pom.xml b/runtime/binding-kafka-grpc/pom.xml index 41b94c3bcb..e33d6edcae 100644 --- a/runtime/binding-kafka-grpc/pom.xml +++ b/runtime/binding-kafka-grpc/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-kafka/pom.xml b/runtime/binding-kafka/pom.xml index c12e6cc57d..e6911fcf34 100644 --- a/runtime/binding-kafka/pom.xml +++ b/runtime/binding-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-mqtt-kafka/pom.xml b/runtime/binding-mqtt-kafka/pom.xml index 4c9910f84c..7a383c0847 100644 --- a/runtime/binding-mqtt-kafka/pom.xml +++ b/runtime/binding-mqtt-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-mqtt/pom.xml b/runtime/binding-mqtt/pom.xml index b349482243..647f5e7508 100644 --- a/runtime/binding-mqtt/pom.xml +++ b/runtime/binding-mqtt/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-proxy/pom.xml b/runtime/binding-proxy/pom.xml index 38985b6961..1ae8b00a03 100644 --- a/runtime/binding-proxy/pom.xml +++ b/runtime/binding-proxy/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-sse-kafka/pom.xml b/runtime/binding-sse-kafka/pom.xml index 73486efd4d..379a403f1b 100644 --- a/runtime/binding-sse-kafka/pom.xml +++ b/runtime/binding-sse-kafka/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-sse/pom.xml b/runtime/binding-sse/pom.xml index 1d7aa891b8..0db4d56018 100644 --- a/runtime/binding-sse/pom.xml +++ b/runtime/binding-sse/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-tcp/pom.xml b/runtime/binding-tcp/pom.xml index 7df01d00f5..8f8c51f266 100644 --- a/runtime/binding-tcp/pom.xml +++ b/runtime/binding-tcp/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-tls/pom.xml b/runtime/binding-tls/pom.xml index 6b6779e140..decdebf311 100644 --- a/runtime/binding-tls/pom.xml +++ b/runtime/binding-tls/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/binding-ws/pom.xml b/runtime/binding-ws/pom.xml index 6f846d1a83..1ca326fb8e 100644 --- a/runtime/binding-ws/pom.xml +++ b/runtime/binding-ws/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/command-metrics/pom.xml b/runtime/command-metrics/pom.xml index ad52828b76..cd972027f1 100644 --- a/runtime/command-metrics/pom.xml +++ b/runtime/command-metrics/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/command-start/pom.xml b/runtime/command-start/pom.xml index 8ab765d93d..88b6fad84b 100644 --- a/runtime/command-start/pom.xml +++ b/runtime/command-start/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/command-stop/pom.xml b/runtime/command-stop/pom.xml index 9334bc51e1..359ac57da4 100644 --- a/runtime/command-stop/pom.xml +++ b/runtime/command-stop/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/command/pom.xml b/runtime/command/pom.xml index 6b4253202a..ca31d008d9 100644 --- a/runtime/command/pom.xml +++ b/runtime/command/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/engine/pom.xml b/runtime/engine/pom.xml index 4b7ca88b6f..9a37064f60 100644 --- a/runtime/engine/pom.xml +++ b/runtime/engine/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/exporter-prometheus/pom.xml b/runtime/exporter-prometheus/pom.xml index dad973cdb8..0959fd136b 100644 --- a/runtime/exporter-prometheus/pom.xml +++ b/runtime/exporter-prometheus/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/guard-jwt/pom.xml b/runtime/guard-jwt/pom.xml index 512ee9f932..fe5518c2ac 100644 --- a/runtime/guard-jwt/pom.xml +++ b/runtime/guard-jwt/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/metrics-grpc/pom.xml b/runtime/metrics-grpc/pom.xml index 5713263b66..0e3bae4fa6 100644 --- a/runtime/metrics-grpc/pom.xml +++ b/runtime/metrics-grpc/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/metrics-http/pom.xml b/runtime/metrics-http/pom.xml index f505adfd82..d35fa39af7 100644 --- a/runtime/metrics-http/pom.xml +++ b/runtime/metrics-http/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/metrics-stream/pom.xml b/runtime/metrics-stream/pom.xml index 9ccb8e23dd..f263d4014c 100644 --- a/runtime/metrics-stream/pom.xml +++ b/runtime/metrics-stream/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/pom.xml b/runtime/pom.xml index f56e111660..47920a8e6c 100644 --- a/runtime/pom.xml +++ b/runtime/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/runtime/vault-filesystem/pom.xml b/runtime/vault-filesystem/pom.xml index d5ab82cb01..f521404d5d 100644 --- a/runtime/vault-filesystem/pom.xml +++ b/runtime/vault-filesystem/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla runtime - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-echo.spec/pom.xml b/specs/binding-echo.spec/pom.xml index d0bfed636e..267325672f 100644 --- a/specs/binding-echo.spec/pom.xml +++ b/specs/binding-echo.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-fan.spec/pom.xml b/specs/binding-fan.spec/pom.xml index 027d3210d7..66184d02f9 100644 --- a/specs/binding-fan.spec/pom.xml +++ b/specs/binding-fan.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-filesystem.spec/pom.xml b/specs/binding-filesystem.spec/pom.xml index cc20c4134b..89aec467f2 100644 --- a/specs/binding-filesystem.spec/pom.xml +++ b/specs/binding-filesystem.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-grpc-kafka.spec/pom.xml b/specs/binding-grpc-kafka.spec/pom.xml index 546b34371a..a673e26c5d 100644 --- a/specs/binding-grpc-kafka.spec/pom.xml +++ b/specs/binding-grpc-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-grpc.spec/pom.xml b/specs/binding-grpc.spec/pom.xml index c75c35e62e..e0d1c00510 100644 --- a/specs/binding-grpc.spec/pom.xml +++ b/specs/binding-grpc.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-http-filesystem.spec/pom.xml b/specs/binding-http-filesystem.spec/pom.xml index b9499ae553..45cfd0eb3d 100644 --- a/specs/binding-http-filesystem.spec/pom.xml +++ b/specs/binding-http-filesystem.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-http-kafka.spec/pom.xml b/specs/binding-http-kafka.spec/pom.xml index 456a3106b5..aa10040a0e 100644 --- a/specs/binding-http-kafka.spec/pom.xml +++ b/specs/binding-http-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-http.spec/pom.xml b/specs/binding-http.spec/pom.xml index 38a7898be9..51d7c72457 100644 --- a/specs/binding-http.spec/pom.xml +++ b/specs/binding-http.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-kafka-grpc.spec/pom.xml b/specs/binding-kafka-grpc.spec/pom.xml index 197265531f..7398f03305 100644 --- a/specs/binding-kafka-grpc.spec/pom.xml +++ b/specs/binding-kafka-grpc.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-kafka.spec/pom.xml b/specs/binding-kafka.spec/pom.xml index ab11b498f2..7b8d82ad62 100644 --- a/specs/binding-kafka.spec/pom.xml +++ b/specs/binding-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-mqtt-kafka.spec/pom.xml b/specs/binding-mqtt-kafka.spec/pom.xml index f19bfcfb9d..dbb6a9ef2b 100644 --- a/specs/binding-mqtt-kafka.spec/pom.xml +++ b/specs/binding-mqtt-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-mqtt.spec/pom.xml b/specs/binding-mqtt.spec/pom.xml index f624bb85ae..66a8f87601 100644 --- a/specs/binding-mqtt.spec/pom.xml +++ b/specs/binding-mqtt.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-proxy.spec/pom.xml b/specs/binding-proxy.spec/pom.xml index fcdb4b9723..73fdd2eefb 100644 --- a/specs/binding-proxy.spec/pom.xml +++ b/specs/binding-proxy.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-sse-kafka.spec/pom.xml b/specs/binding-sse-kafka.spec/pom.xml index 20b099b140..c6a1fdb188 100644 --- a/specs/binding-sse-kafka.spec/pom.xml +++ b/specs/binding-sse-kafka.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-sse.spec/pom.xml b/specs/binding-sse.spec/pom.xml index 5cf8e9018c..9ff1407e61 100644 --- a/specs/binding-sse.spec/pom.xml +++ b/specs/binding-sse.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-tcp.spec/pom.xml b/specs/binding-tcp.spec/pom.xml index 3a64fc98ba..a4c49db30e 100644 --- a/specs/binding-tcp.spec/pom.xml +++ b/specs/binding-tcp.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-tls.spec/pom.xml b/specs/binding-tls.spec/pom.xml index dd0ca5341e..a7b4002a9c 100644 --- a/specs/binding-tls.spec/pom.xml +++ b/specs/binding-tls.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/binding-ws.spec/pom.xml b/specs/binding-ws.spec/pom.xml index d7f9373844..81c9adfdbd 100644 --- a/specs/binding-ws.spec/pom.xml +++ b/specs/binding-ws.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/engine.spec/pom.xml b/specs/engine.spec/pom.xml index 1d5bd3dafe..9708996de0 100644 --- a/specs/engine.spec/pom.xml +++ b/specs/engine.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/exporter-prometheus.spec/pom.xml b/specs/exporter-prometheus.spec/pom.xml index ab815f1981..cac9e24c38 100644 --- a/specs/exporter-prometheus.spec/pom.xml +++ b/specs/exporter-prometheus.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/guard-jwt.spec/pom.xml b/specs/guard-jwt.spec/pom.xml index ebe02ac328..671f68e2da 100644 --- a/specs/guard-jwt.spec/pom.xml +++ b/specs/guard-jwt.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/metrics-grpc.spec/pom.xml b/specs/metrics-grpc.spec/pom.xml index 884c0b1751..ab56e274cf 100644 --- a/specs/metrics-grpc.spec/pom.xml +++ b/specs/metrics-grpc.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/metrics-http.spec/pom.xml b/specs/metrics-http.spec/pom.xml index 7d6afa3aaa..d54693f8d4 100644 --- a/specs/metrics-http.spec/pom.xml +++ b/specs/metrics-http.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/metrics-stream.spec/pom.xml b/specs/metrics-stream.spec/pom.xml index 5d51e42f16..e5f70cb5da 100644 --- a/specs/metrics-stream.spec/pom.xml +++ b/specs/metrics-stream.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/pom.xml b/specs/pom.xml index 5979459274..54f3482cc4 100644 --- a/specs/pom.xml +++ b/specs/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla zilla - develop-SNAPSHOT + 0.9.52 ../pom.xml diff --git a/specs/vault-filesystem.spec/pom.xml b/specs/vault-filesystem.spec/pom.xml index b248001b27..943d8b6e7c 100644 --- a/specs/vault-filesystem.spec/pom.xml +++ b/specs/vault-filesystem.spec/pom.xml @@ -8,7 +8,7 @@ io.aklivity.zilla specs - develop-SNAPSHOT + 0.9.52 ../pom.xml