diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index c310bc9e57..c85e92ec20 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -34,7 +34,7 @@ jobs:
- name: Build with Maven
run: ./mvnw -B -U -nsu -Ddocker.logStdout -Dfailsafe.skipAfterFailureCount=1 -Ddocker.verbose install jacoco:report-aggregate
- name: Conditional Artifact Upload
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
if: failure()
with:
name: zilla-dump
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index d790178a93..8dc3602146 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -50,7 +50,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@v2
+ uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -73,7 +73,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
- uses: github/codeql-action/autobuild@v2
+ uses: github/codeql-action/autobuild@v3
# âšī¸ Command-line programs to run using the OS shell.
# đ See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@@ -86,6 +86,6 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
+ uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 898ca0cb8d..bb06b35580 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,7 +2,41 @@
## [Unreleased](https://github.com/aklivity/zilla/tree/HEAD)
-[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.61...HEAD)
+[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.62...HEAD)
+
+**Implemented enhancements:**
+
+- Support MQTT message expiry in `mqtt-kafka` mapping [\#631](https://github.com/aklivity/zilla/issues/631)
+- Implement mqtt message expiry [\#640](https://github.com/aklivity/zilla/pull/640) ([bmaidics](https://github.com/bmaidics))
+- Improve server sent DISCONNECT reasonCodes [\#634](https://github.com/aklivity/zilla/pull/634) ([bmaidics](https://github.com/bmaidics))
+
+**Fixed bugs:**
+
+- OffsetFetch Request should connect to the coordinator instead of a random member of cluster [\#653](https://github.com/aklivity/zilla/issues/653)
+- Mqtt-kakfa will message bugfixes [\#644](https://github.com/aklivity/zilla/pull/644) ([bmaidics](https://github.com/bmaidics))
+
+**Closed issues:**
+
+- gRPC remote\_server gets duplicate messages [\#480](https://github.com/aklivity/zilla/issues/480)
+- Log compaction behavior with or without bootstrap is not consistent [\#389](https://github.com/aklivity/zilla/issues/389)
+
+**Merged pull requests:**
+
+- Fix static field [\#655](https://github.com/aklivity/zilla/pull/655) ([akrambek](https://github.com/akrambek))
+- OffsetFetch Request should connect to the coordinator instead of a random member of cluster [\#654](https://github.com/aklivity/zilla/pull/654) ([akrambek](https://github.com/akrambek))
+- Add grpc extension parsing to the dump command [\#652](https://github.com/aklivity/zilla/pull/652) ([attilakreiner](https://github.com/attilakreiner))
+- Add end-to-end testing for the `dump` command [\#646](https://github.com/aklivity/zilla/pull/646) ([attilakreiner](https://github.com/attilakreiner))
+- Bump actions/upload-artifact from 3 to 4 [\#645](https://github.com/aklivity/zilla/pull/645) ([dependabot[bot]](https://github.com/apps/dependabot))
+- Bump github/codeql-action from 2 to 3 [\#643](https://github.com/aklivity/zilla/pull/643) ([dependabot[bot]](https://github.com/apps/dependabot))
+- Fix `java.util.MissingFormatArgumentException` when using Kafka debugging. [\#639](https://github.com/aklivity/zilla/pull/639) ([voutilad](https://github.com/voutilad))
+- Json schema errors [\#638](https://github.com/aklivity/zilla/pull/638) ([vordimous](https://github.com/vordimous))
+- Add jumbograms and proxy extension parsing to dump command [\#635](https://github.com/aklivity/zilla/pull/635) ([attilakreiner](https://github.com/attilakreiner))
+- Bump ubuntu from jammy-20230916 to jammy-20231128 in /cloud/docker-image/src/main/docker/incubator [\#608](https://github.com/aklivity/zilla/pull/608) ([dependabot[bot]](https://github.com/apps/dependabot))
+- Bump ubuntu from jammy-20230916 to jammy-20231128 in /cloud/docker-image/src/main/docker/release [\#607](https://github.com/aklivity/zilla/pull/607) ([dependabot[bot]](https://github.com/apps/dependabot))
+
+## [0.9.62](https://github.com/aklivity/zilla/tree/0.9.62) (2023-12-13)
+
+[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.61...0.9.62)
**Closed issues:**
diff --git a/build/flyweight-maven-plugin/pom.xml b/build/flyweight-maven-plugin/pom.xml
index 07bc31d5ab..c4ace0a2f8 100644
--- a/build/flyweight-maven-plugin/pom.xml
+++ b/build/flyweight-maven-plugin/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
build
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/build/pom.xml b/build/pom.xml
index 1a83751a9d..25bfcb6bc6 100644
--- a/build/pom.xml
+++ b/build/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml
index dfa107a56e..45c763586a 100644
--- a/cloud/docker-image/pom.xml
+++ b/cloud/docker-image/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
cloud
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/cloud/docker-image/src/main/docker/incubator/Dockerfile b/cloud/docker-image/src/main/docker/incubator/Dockerfile
index ad2dbcdc06..3d25d90186 100644
--- a/cloud/docker-image/src/main/docker/incubator/Dockerfile
+++ b/cloud/docker-image/src/main/docker/incubator/Dockerfile
@@ -27,7 +27,7 @@ RUN cat zpm.json.template | env VERSION=${project.version} envsubst > zpm.json
RUN ./zpmw install --debug --exclude-remote-repositories
RUN ./zpmw clean --keep-image
-FROM ubuntu:jammy-20230916
+FROM ubuntu:jammy-20231128
ENV ZILLA_VERSION ${project.version}
diff --git a/cloud/docker-image/src/main/docker/release/Dockerfile b/cloud/docker-image/src/main/docker/release/Dockerfile
index ad2dbcdc06..3d25d90186 100644
--- a/cloud/docker-image/src/main/docker/release/Dockerfile
+++ b/cloud/docker-image/src/main/docker/release/Dockerfile
@@ -27,7 +27,7 @@ RUN cat zpm.json.template | env VERSION=${project.version} envsubst > zpm.json
RUN ./zpmw install --debug --exclude-remote-repositories
RUN ./zpmw clean --keep-image
-FROM ubuntu:jammy-20230916
+FROM ubuntu:jammy-20231128
ENV ZILLA_VERSION ${project.version}
diff --git a/cloud/helm-chart/pom.xml b/cloud/helm-chart/pom.xml
index 3b452113b0..ea6d606c9b 100644
--- a/cloud/helm-chart/pom.xml
+++ b/cloud/helm-chart/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
cloud
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/cloud/pom.xml b/cloud/pom.xml
index fc26e68490..f631997f5e 100644
--- a/cloud/pom.xml
+++ b/cloud/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/conf/pom.xml b/conf/pom.xml
index 1f6a5bfe8d..c857d0acef 100644
--- a/conf/pom.xml
+++ b/conf/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/binding-amqp.spec/pom.xml b/incubator/binding-amqp.spec/pom.xml
index 3a62f2dae5..e5e8540189 100644
--- a/incubator/binding-amqp.spec/pom.xml
+++ b/incubator/binding-amqp.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/binding-amqp/pom.xml b/incubator/binding-amqp/pom.xml
index bb53773c1c..0770322188 100644
--- a/incubator/binding-amqp/pom.xml
+++ b/incubator/binding-amqp/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/catalog-inline.spec/pom.xml b/incubator/catalog-inline.spec/pom.xml
index 778927f132..578807fa71 100644
--- a/incubator/catalog-inline.spec/pom.xml
+++ b/incubator/catalog-inline.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/catalog-inline/pom.xml b/incubator/catalog-inline/pom.xml
index 67f7d062bf..46bad00268 100644
--- a/incubator/catalog-inline/pom.xml
+++ b/incubator/catalog-inline/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/catalog-schema-registry.spec/pom.xml b/incubator/catalog-schema-registry.spec/pom.xml
index 8e25996120..4ddded993e 100644
--- a/incubator/catalog-schema-registry.spec/pom.xml
+++ b/incubator/catalog-schema-registry.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/catalog-schema-registry/pom.xml b/incubator/catalog-schema-registry/pom.xml
index 279ccd0905..ad7a1e03e0 100644
--- a/incubator/catalog-schema-registry/pom.xml
+++ b/incubator/catalog-schema-registry/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/command-dump/README.md b/incubator/command-dump/README.md
new file mode 100644
index 0000000000..751d641d9a
--- /dev/null
+++ b/incubator/command-dump/README.md
@@ -0,0 +1,11 @@
+The `dump` command creates a `pcap` file that can be opened by Wireshark using the `zilla.lua` dissector plugin.
+
+`WiresharkIT` is an integration test that tests the `zilla.lua` dissector by running `tshark` in a docker container. If it doesn't find the image, it builds it on-the-fly, but the process is faster if the `tshark` image is pre-built.
+
+This is the command to build a multi-arch `tshark` image and push it to a docker repository:
+
+```bash
+cd /incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline
+docker buildx create --name container --driver=docker-container
+docker buildx build --tag /tshark: --platform linux/arm64/v8,linux/amd64 --builder container --push .
+```
diff --git a/incubator/command-dump/pom.xml b/incubator/command-dump/pom.xml
index 29cf5ceeca..cb78128383 100644
--- a/incubator/command-dump/pom.xml
+++ b/incubator/command-dump/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
@@ -49,11 +49,53 @@
${project.version}
provided
+
+ io.aklivity.zilla
+ binding-proxy.spec
+ ${project.version}
+ test
+
+
+ io.aklivity.zilla
+ binding-http.spec
+ ${project.version}
+ test
+
+
+ io.aklivity.zilla
+ binding-grpc.spec
+ ${project.version}
+ test
+
org.junit.jupiter
junit-jupiter-engine
test
+
+ org.testcontainers
+ testcontainers
+ 1.19.3
+ test
+
+
+ org.testcontainers
+ junit-jupiter
+ 1.19.3
+ test
+
+
+ org.slf4j
+ slf4j-api
+ 1.7.36
+ test
+
+
+ org.slf4j
+ slf4j-simple
+ 1.7.36
+ test
+
@@ -67,8 +109,9 @@
license-maven-plugin
- src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/*
- src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/*
+ src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/*.pcap
+ src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/*.txt
+ src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/*
@@ -91,6 +134,11 @@
org.apache.maven.plugins
maven-failsafe-plugin
+
+ org.apache.maven.plugins
+ maven-resources-plugin
+ 3.3.1
+
org.jacoco
jacoco-maven-plugin
@@ -137,5 +185,14 @@
+
+
+ src/main/resources
+ true
+
+ **/zilla.lua
+
+
+
diff --git a/incubator/command-dump/src/main/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommand.java b/incubator/command-dump/src/main/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommand.java
index b98a35b2e6..4315b4ef5c 100644
--- a/incubator/command-dump/src/main/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommand.java
+++ b/incubator/command-dump/src/main/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommand.java
@@ -21,14 +21,16 @@
import static java.nio.file.StandardOpenOption.WRITE;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.agrona.LangUtil.rethrowUnchecked;
+import static org.agrona.concurrent.ringbuffer.RecordDescriptor.HEADER_LENGTH;
-import java.io.IOException;
+import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
@@ -60,6 +62,7 @@
import io.aklivity.zilla.runtime.command.dump.internal.airline.spy.RingBufferSpy;
import io.aklivity.zilla.runtime.command.dump.internal.types.Flyweight;
import io.aklivity.zilla.runtime.command.dump.internal.types.IPv6HeaderFW;
+import io.aklivity.zilla.runtime.command.dump.internal.types.IPv6JumboHeaderFW;
import io.aklivity.zilla.runtime.command.dump.internal.types.PcapGlobalHeaderFW;
import io.aklivity.zilla.runtime.command.dump.internal.types.PcapPacketHeaderFW;
import io.aklivity.zilla.runtime.command.dump.internal.types.TcpFlag;
@@ -90,7 +93,8 @@ public final class ZillaDumpCommand extends ZillaCommand
private static final long MIN_PARK_NS = MILLISECONDS.toNanos(1L);
private static final int MAX_YIELDS = 30;
private static final int MAX_SPINS = 20;
- private static final int BUFFER_SLOT_CAPACITY = 64 * 1024;
+ private static final int WRITE_BUFFER_SLOT_CAPACITY = 64 * 1024;
+ private static final int PATCH_BUFFER_SLOT_CAPACITY = 64 * 1024 + 85;
private static final int LABELS_BUFFER_SLOT_CAPACITY = 4 * 128;
private static final long PCAP_GLOBAL_MAGIC = 2712847316L;
@@ -99,10 +103,15 @@ public final class ZillaDumpCommand extends ZillaCommand
private static final int PCAP_GLOBAL_SIZE = 24;
private static final int PCAP_LINK_TYPE_IPV6 = 1;
- private static final byte[] PSEUDO_ETHERNET_FRAME = BitUtil.fromHex("2052454356002053454e440086dd");
- private static final int PSEUDO_IPV6_PREFIX = 1629561669;
- private static final short PSEUDO_NEXT_HEADER_AND_HOP_LIMIT = 0x0640;
+ private static final byte[] ETHERNET_FRAME = BitUtil.fromHex("2052454356002053454e440086dd");
+
+ private static final int IPV6_PREFIX = 0x61212345;
+ private static final byte IPV6_NEXT_HEADER_TCP = 0x06;
+ private static final byte IPV6_NEXT_HEADER_JUMBO = 0x00;
+ private static final byte IPV6_HOP_LIMIT = 0x40;
private static final long IPV6_LOCAL_ADDRESS = 0xfe80L << 48;
+ private static final int IPV6_JUMBO_PREFIX = 0x0600c204;
+ private static final int IPV6_JUMBO_THRESHOLD = 0xffff;
private static final int PCAP_HEADER_OFFSET = 0;
private static final int PCAP_HEADER_SIZE = 16;
@@ -115,6 +124,8 @@ public final class ZillaDumpCommand extends ZillaCommand
private static final int IPV6_HEADER_OFFSET = ETHER_HEADER_LIMIT;
private static final int IPV6_HEADER_SIZE = 40;
private static final int IPV6_HEADER_LIMIT = IPV6_HEADER_OFFSET + IPV6_HEADER_SIZE;
+ private static final int IPV6_JUMBO_HEADER_OFFSET = IPV6_HEADER_LIMIT;
+ private static final int IPV6_JUMBO_HEADER_SIZE = 8;
private static final int TCP_HEADER_OFFSET = IPV6_HEADER_LIMIT;
private static final int TCP_HEADER_SIZE = 20;
@@ -122,7 +133,9 @@ public final class ZillaDumpCommand extends ZillaCommand
private static final int ZILLA_HEADER_OFFSET = TCP_HEADER_LIMIT;
private static final int ZILLA_PROTOCOL_TYPE_OFFSET = ZILLA_HEADER_OFFSET + 4;
- private static final int ZILLA_HEADER_SIZE = 8;
+ private static final int ZILLA_WORKER_OFFSET = ZILLA_PROTOCOL_TYPE_OFFSET + 4;
+ private static final int ZILLA_OFFSET_OFFSET = ZILLA_WORKER_OFFSET + 4;
+ private static final int ZILLA_HEADER_SIZE = 16;
private static final int ZILLA_HEADER_LIMIT = ZILLA_HEADER_OFFSET + ZILLA_HEADER_SIZE;
private static final int TYPE_ID_INDEX = 0;
@@ -149,8 +162,13 @@ public final class ZillaDumpCommand extends ZillaCommand
description = "Dump specific namespaced bindings only, e.g example.http0,example.kafka0")
public List bindings = new ArrayList<>();
- @Option(name = {"-o", "--output"},
- description = "PCAP output filename",
+ @Option(name = {"-i", "--install"},
+ description = "Install Zilla dissector to Wireshark plugin directory",
+ typeConverterProvider = ZillaDumpCommandPathConverterProvider.class)
+ public Path pluginDirectory;
+
+ @Option(name = {"-w", "--write"},
+ description = "Write output to PCAP file",
typeConverterProvider = ZillaDumpCommandPathConverterProvider.class)
public Path output;
@@ -168,6 +186,11 @@ public final class ZillaDumpCommand extends ZillaCommand
hidden = true)
public String propertiesPath;
+ @Option(name = "-e",
+ description = "Show exception traces",
+ hidden = true)
+ public boolean exceptions;
+
boolean continuous = true;
private final FrameFW frameRO = new FrameFW();
@@ -190,6 +213,7 @@ public final class ZillaDumpCommand extends ZillaCommand
private final FlushFW.Builder flushRW = new FlushFW.Builder();
private final ChallengeFW.Builder challengeRW = new ChallengeFW.Builder();
private final IPv6HeaderFW.Builder ipv6HeaderRW = new IPv6HeaderFW.Builder();
+ private final IPv6JumboHeaderFW.Builder ipv6JumboHeaderRW = new IPv6JumboHeaderFW.Builder();
private final TcpHeaderFW.Builder tcpHeaderRW = new TcpHeaderFW.Builder();
private final MutableDirectBuffer patchBuffer;
private final MutableDirectBuffer writeBuffer;
@@ -198,13 +222,37 @@ public final class ZillaDumpCommand extends ZillaCommand
public ZillaDumpCommand()
{
- this.patchBuffer = new UnsafeBuffer(ByteBuffer.allocate(BUFFER_SLOT_CAPACITY));
- this.writeBuffer = new UnsafeBuffer(ByteBuffer.allocate(BUFFER_SLOT_CAPACITY));
+ this.patchBuffer = new UnsafeBuffer(ByteBuffer.allocate(PATCH_BUFFER_SLOT_CAPACITY));
+ this.writeBuffer = new UnsafeBuffer(ByteBuffer.allocate(WRITE_BUFFER_SLOT_CAPACITY));
}
@Override
public void run()
{
+ if (pluginDirectory != null)
+ {
+ try
+ {
+ InputStream is = getClass().getResourceAsStream("zilla.lua");
+ Files.createDirectories(pluginDirectory);
+ Path target = pluginDirectory.resolve("zilla.lua");
+ Files.copy(is, target, StandardCopyOption.REPLACE_EXISTING);
+ if (verbose)
+ {
+ System.out.printf("Copied Wireshark plugin to the directory: %s%n", pluginDirectory);
+ }
+ }
+ catch (Exception ex)
+ {
+ System.out.printf("Failed to copy the Wireshark plugin to the directory: %s%n", pluginDirectory);
+ if (exceptions)
+ {
+ ex.printStackTrace();
+ }
+ rethrowUnchecked(ex);
+ }
+ }
+
Properties props = new Properties();
props.setProperty(ENGINE_DIRECTORY.name(), ".zilla/engine");
@@ -215,9 +263,13 @@ public void run()
{
props.load(Files.newInputStream(path));
}
- catch (IOException ex)
+ catch (Exception ex)
{
- System.out.println("Failed to load properties: " + path);
+ System.out.printf("Failed to load properties: %s%n", path);
+ if (exceptions)
+ {
+ ex.printStackTrace();
+ }
rethrowUnchecked(ex);
}
}
@@ -252,6 +304,7 @@ public void run()
{
final RingBufferSpy[] streamBuffers = files
.filter(this::isStreamsFile)
+ .sorted()
.peek(this::onDiscovered)
.map(this::createStreamBuffer)
.collect(Collectors.toList())
@@ -260,8 +313,11 @@ public void run()
final IdleStrategy idleStrategy = new BackoffIdleStrategy(MAX_SPINS, MAX_YIELDS, MIN_PARK_NS, MAX_PARK_NS);
final BindingsLayoutReader bindings = BindingsLayoutReader.builder().directory(directory).build();
- final DumpHandler dumpHandler = new DumpHandler(filter, labels::lookupLabel, bindings.bindings()::get, writer);
- final MessagePredicate spyHandler = dumpHandler::handleFrame;
+ final DumpHandler[] dumpHandlers = new DumpHandler[streamBufferCount];
+ for (int i = 0; i < streamBufferCount; i++)
+ {
+ dumpHandlers[i] = new DumpHandler(i, filter, labels::lookupLabel, bindings.bindings()::get, writer);
+ }
final MutableDirectBuffer buffer = writeBuffer;
encodePcapGlobal(buffer);
@@ -275,13 +331,18 @@ public void run()
for (int i = 0; i < streamBufferCount; i++)
{
final RingBufferSpy streamBuffer = streamBuffers[i];
+ MessagePredicate spyHandler = dumpHandlers[i]::handleFrame;
workCount += streamBuffer.spy(spyHandler, 1);
}
idleStrategy.idle(workCount);
} while (workCount != exitWorkCount);
}
- catch (IOException ex)
+ catch (Exception ex)
{
+ if (exceptions)
+ {
+ ex.printStackTrace();
+ }
rethrowUnchecked(ex);
}
}
@@ -319,7 +380,7 @@ private void onDiscovered(
{
if (verbose)
{
- System.out.printf("Discovered: %s\n", path);
+ System.out.printf("Discovered: %s%n", path);
}
}
@@ -351,9 +412,13 @@ private void writePcapOutput(
byteBuf.limit(offset + length);
writer.write(byteBuf);
}
- catch (IOException ex)
+ catch (Exception ex)
{
- System.out.println("Could not write to file. Reason: " + ex.getMessage());
+ System.out.printf("Could not write to file. Reason: %s%n", ex.getMessage());
+ if (exceptions)
+ {
+ ex.printStackTrace();
+ }
rethrowUnchecked(ex);
}
}
@@ -372,6 +437,7 @@ private static int localId(
private final class DumpHandler
{
+ private final int worker;
private final LongPredicate allowedBinding;
private final WritableByteChannel writer;
private final IntFunction lookupLabel;
@@ -385,11 +451,13 @@ private final class DumpHandler
private long nextTimestamp = Long.MAX_VALUE;
private DumpHandler(
+ int worker,
LongPredicate allowedBinding,
IntFunction lookupLabel,
Function lookupBindingInfo,
WritableByteChannel writer)
{
+ this.worker = worker;
this.allowedBinding = allowedBinding;
this.lookupLabel = lookupLabel;
this.lookupBindingInfo = lookupBindingInfo;
@@ -471,14 +539,15 @@ private void onBegin(
{
if (allowedBinding.test(begin.routedId()))
{
+ int offset = begin.offset() - HEADER_LENGTH;
final BeginFW newBegin = beginRW.wrap(patchBuffer, 0, begin.sizeof()).set(begin).build();
final ExtensionFW extension = newBegin.extension().get(extensionRO::tryWrap);
patchExtension(patchBuffer, extension, BeginFW.FIELD_OFFSET_EXTENSION);
final boolean initial = begin.streamId() % 2 != 0;
short tcpFlags = initial ? PSH_ACK_SYN : PSH_ACK;
- writeFrame(BeginFW.TYPE_ID, newBegin.originId(), newBegin.routedId(), newBegin.streamId(), newBegin.timestamp(),
- newBegin, tcpFlags);
+ writeFrame(BeginFW.TYPE_ID, worker, offset, newBegin.originId(), newBegin.routedId(), newBegin.streamId(),
+ newBegin.timestamp(), newBegin, tcpFlags);
}
}
@@ -487,12 +556,14 @@ private void onData(
{
if (allowedBinding.test(data.routedId()))
{
+ int offset = data.offset() - HEADER_LENGTH;
final DataFW newData = dataRW.wrap(patchBuffer, 0, data.sizeof()).set(data).build();
final ExtensionFW extension = newData.extension().get(extensionRO::tryWrap);
- patchExtension(patchBuffer, extension, DataFW.FIELD_OFFSET_EXTENSION);
+ int extensionOffset = DataFW.FIELD_OFFSET_PAYLOAD + Math.max(newData.length(), 0) + DataFW.FIELD_OFFSET_EXTENSION;
+ patchExtension(patchBuffer, extension, extensionOffset);
- writeFrame(DataFW.TYPE_ID, newData.originId(), newData.routedId(), newData.streamId(), newData.timestamp(),
- newData, PSH_ACK);
+ writeFrame(DataFW.TYPE_ID, worker, offset, newData.originId(), newData.routedId(), newData.streamId(),
+ newData.timestamp(), newData, PSH_ACK);
}
}
@@ -501,12 +572,13 @@ private void onEnd(
{
if (allowedBinding.test(end.routedId()))
{
+ int offset = end.offset() - HEADER_LENGTH;
final EndFW newEnd = endRW.wrap(patchBuffer, 0, end.sizeof()).set(end).build();
final ExtensionFW extension = newEnd.extension().get(extensionRO::tryWrap);
patchExtension(patchBuffer, extension, EndFW.FIELD_OFFSET_EXTENSION);
- writeFrame(EndFW.TYPE_ID, newEnd.originId(), newEnd.routedId(), newEnd.streamId(), newEnd.timestamp(),
- newEnd, PSH_ACK_FIN);
+ writeFrame(EndFW.TYPE_ID, worker, offset, newEnd.originId(), newEnd.routedId(), newEnd.streamId(),
+ newEnd.timestamp(), newEnd, PSH_ACK_FIN);
}
}
@@ -515,12 +587,13 @@ private void onAbort(
{
if (allowedBinding.test(abort.routedId()))
{
+ int offset = abort.offset() - HEADER_LENGTH;
final AbortFW newAbort = abortRW.wrap(patchBuffer, 0, abort.sizeof()).set(abort).build();
final ExtensionFW extension = newAbort.extension().get(extensionRO::tryWrap);
patchExtension(patchBuffer, extension, AbortFW.FIELD_OFFSET_EXTENSION);
- writeFrame(AbortFW.TYPE_ID, newAbort.originId(), newAbort.routedId(), newAbort.streamId(), newAbort.timestamp(),
- newAbort, PSH_ACK_FIN);
+ writeFrame(AbortFW.TYPE_ID, worker, offset, newAbort.originId(), newAbort.routedId(), newAbort.streamId(),
+ newAbort.timestamp(), newAbort, PSH_ACK_FIN);
}
}
@@ -529,8 +602,9 @@ private void onWindow(
{
if (allowedBinding.test(window.routedId()))
{
- writeFrame(WindowFW.TYPE_ID, window.originId(), window.routedId(), window.streamId(), window.timestamp(), window,
- PSH_ACK);
+ int offset = window.offset() - HEADER_LENGTH;
+ writeFrame(WindowFW.TYPE_ID, worker, offset, window.originId(), window.routedId(), window.streamId(),
+ window.timestamp(), window, PSH_ACK);
}
}
@@ -539,12 +613,13 @@ private void onReset(
{
if (allowedBinding.test(reset.routedId()))
{
+ int offset = reset.offset() - HEADER_LENGTH;
final ResetFW newReset = resetRW.wrap(patchBuffer, 0, reset.sizeof()).set(reset).build();
final ExtensionFW extension = newReset.extension().get(extensionRO::tryWrap);
patchExtension(patchBuffer, extension, ResetFW.FIELD_OFFSET_EXTENSION);
- writeFrame(ResetFW.TYPE_ID, newReset.originId(), newReset.routedId(), newReset.streamId(), newReset.timestamp(),
- newReset, PSH_ACK_FIN);
+ writeFrame(ResetFW.TYPE_ID, worker, offset, newReset.originId(), newReset.routedId(), newReset.streamId(),
+ newReset.timestamp(), newReset, PSH_ACK_FIN);
}
}
@@ -553,12 +628,13 @@ private void onFlush(
{
if (allowedBinding.test(flush.routedId()))
{
+ int offset = flush.offset() - HEADER_LENGTH;
final FlushFW newFlush = flushRW.wrap(patchBuffer, 0, flush.sizeof()).set(flush).build();
final ExtensionFW extension = newFlush.extension().get(extensionRO::tryWrap);
patchExtension(patchBuffer, extension, FlushFW.FIELD_OFFSET_EXTENSION);
- writeFrame(FlushFW.TYPE_ID, newFlush.originId(), newFlush.routedId(), newFlush.streamId(), newFlush.timestamp(),
- newFlush, PSH_ACK);
+ writeFrame(FlushFW.TYPE_ID, worker, offset, newFlush.originId(), newFlush.routedId(), newFlush.streamId(),
+ newFlush.timestamp(), newFlush, PSH_ACK);
}
}
@@ -567,8 +643,9 @@ private void onSignal(
{
if (allowedBinding.test(signal.routedId()))
{
- writeFrame(SignalFW.TYPE_ID, signal.originId(), signal.routedId(), signal.streamId(), signal.timestamp(), signal,
- PSH_ACK);
+ int offset = signal.offset() - HEADER_LENGTH;
+ writeFrame(SignalFW.TYPE_ID, worker, offset, signal.originId(), signal.routedId(), signal.streamId(),
+ signal.timestamp(), signal, PSH_ACK);
}
}
@@ -577,12 +654,13 @@ private void onChallenge(
{
if (allowedBinding.test(challenge.routedId()))
{
+ int offset = challenge.offset() - HEADER_LENGTH;
final ChallengeFW newChallenge = challengeRW.wrap(patchBuffer, 0, challenge.sizeof()).set(challenge).build();
final ExtensionFW extension = newChallenge.extension().get(extensionRO::tryWrap);
patchExtension(patchBuffer, extension, ChallengeFW.FIELD_OFFSET_EXTENSION);
- writeFrame(ChallengeFW.TYPE_ID, newChallenge.originId(), newChallenge.routedId(), newChallenge.streamId(),
- newChallenge.timestamp(), newChallenge, PSH_ACK);
+ writeFrame(ChallengeFW.TYPE_ID, worker, offset, newChallenge.originId(), newChallenge.routedId(),
+ newChallenge.streamId(), newChallenge.timestamp(), newChallenge, PSH_ACK);
}
}
@@ -634,6 +712,8 @@ private byte[] resolveLabelAsBytes(
private void writeFrame(
int frameTypeId,
+ int worker,
+ int offset,
long originId,
long routedId,
long streamId,
@@ -644,22 +724,24 @@ private void writeFrame(
final int labelsLength = encodeZillaLabels(labelsBuffer, originId, routedId);
final int tcpSegmentLength = ZILLA_HEADER_SIZE + labelsLength + frame.sizeof();
final int ipv6Length = TCP_HEADER_SIZE + tcpSegmentLength;
- final int pcapLength = ETHER_HEADER_SIZE + IPV6_HEADER_SIZE + ipv6Length;
+ final boolean jumbo = ipv6Length > IPV6_JUMBO_THRESHOLD;
+ final int ipv6JumboLength = jumbo ? IPV6_JUMBO_HEADER_SIZE : 0;
+ final int pcapLength = ETHER_HEADER_SIZE + IPV6_HEADER_SIZE + ipv6Length + ipv6JumboLength;
encodePcapHeader(writeBuffer, pcapLength, timestamp);
encodeEtherHeader(writeBuffer);
- encodeIpv6Header(writeBuffer, streamId ^ 1L, streamId, ipv6Length);
+ encodeIpv6Header(writeBuffer, jumbo, streamId ^ 1L, streamId, ipv6Length);
final boolean initial = streamId % 2 != 0;
final long seq = sequence.get(streamId);
final long ack = sequence.get(streamId ^ 1L);
sequence.put(streamId, sequence.get(streamId) + tcpSegmentLength);
- encodeTcpHeader(writeBuffer, initial, seq, ack, tcpFlags);
+ encodeTcpHeader(writeBuffer, ipv6JumboLength, initial, seq, ack, tcpFlags);
final int protocolTypeId = resolveProtocolTypeId(originId, routedId);
- encodeZillaHeader(writeBuffer, frameTypeId, protocolTypeId);
+ encodeZillaHeader(writeBuffer, ipv6JumboLength, frameTypeId, protocolTypeId, worker, offset);
- writePcapOutput(writer, writeBuffer, PCAP_HEADER_OFFSET, ZILLA_HEADER_LIMIT);
+ writePcapOutput(writer, writeBuffer, PCAP_HEADER_OFFSET, ZILLA_HEADER_LIMIT + ipv6JumboLength);
writePcapOutput(writer, labelsBuffer, 0, labelsLength);
writePcapOutput(writer, frame.buffer(), frame.offset(), frame.sizeof());
}
@@ -704,28 +786,52 @@ private void encodePcapHeader(
private void encodeEtherHeader(
MutableDirectBuffer buffer)
{
- buffer.putBytes(ETHER_HEADER_OFFSET, PSEUDO_ETHERNET_FRAME);
+ buffer.putBytes(ETHER_HEADER_OFFSET, ETHERNET_FRAME);
}
private void encodeIpv6Header(
MutableDirectBuffer buffer,
+ boolean jumbo,
long source,
long destination,
int payloadLength)
{
- ipv6HeaderRW.wrap(buffer, IPV6_HEADER_OFFSET, buffer.capacity())
- .prefix(PSEUDO_IPV6_PREFIX)
- .payload_length((short) payloadLength)
- .next_header_and_hop_limit(PSEUDO_NEXT_HEADER_AND_HOP_LIMIT)
- .src_addr_part1(IPV6_LOCAL_ADDRESS)
- .src_addr_part2(source)
- .dst_addr_part1(IPV6_LOCAL_ADDRESS)
- .dst_addr_part2(destination)
- .build();
+ long addrPart1 = IPV6_LOCAL_ADDRESS | worker;
+ if (jumbo)
+ {
+ ipv6HeaderRW.wrap(buffer, IPV6_HEADER_OFFSET, buffer.capacity())
+ .prefix(IPV6_PREFIX)
+ .payload_length((short) 0)
+ .next_header(IPV6_NEXT_HEADER_JUMBO)
+ .hop_limit(IPV6_HOP_LIMIT)
+ .src_addr_part1(addrPart1)
+ .src_addr_part2(source)
+ .dst_addr_part1(addrPart1)
+ .dst_addr_part2(destination)
+ .build();
+ ipv6JumboHeaderRW.wrap(buffer, IPV6_JUMBO_HEADER_OFFSET, buffer.capacity())
+ .prefix(IPV6_JUMBO_PREFIX)
+ .payload_length(payloadLength + IPV6_JUMBO_HEADER_SIZE)
+ .build();
+ }
+ else
+ {
+ ipv6HeaderRW.wrap(buffer, IPV6_HEADER_OFFSET, buffer.capacity())
+ .prefix(IPV6_PREFIX)
+ .payload_length((short) payloadLength)
+ .next_header(IPV6_NEXT_HEADER_TCP)
+ .hop_limit(IPV6_HOP_LIMIT)
+ .src_addr_part1(addrPart1)
+ .src_addr_part2(source)
+ .dst_addr_part1(addrPart1)
+ .dst_addr_part2(destination)
+ .build();
+ }
}
private void encodeTcpHeader(
MutableDirectBuffer buffer,
+ int ipv6JumboLength,
boolean initial,
long sequence,
long acknowledge,
@@ -735,7 +841,7 @@ private void encodeTcpHeader(
short sourcePort = initial ? TCP_SRC_PORT : TCP_DEST_PORT;
short destPort = initial ? TCP_DEST_PORT : TCP_SRC_PORT;
- tcpHeaderRW.wrap(buffer, TCP_HEADER_OFFSET, buffer.capacity())
+ tcpHeaderRW.wrap(buffer, TCP_HEADER_OFFSET + ipv6JumboLength, buffer.capacity())
.src_port(sourcePort)
.dst_port(destPort)
.sequence_number((int) sequence)
@@ -749,11 +855,16 @@ private void encodeTcpHeader(
private void encodeZillaHeader(
MutableDirectBuffer buffer,
+ int ipv6JumboLength,
int frameTypeId,
- int protocolTypeId)
+ int protocolTypeId,
+ int worker,
+ int offset)
{
- buffer.putInt(ZILLA_HEADER_OFFSET, frameTypeId);
- buffer.putInt(ZILLA_PROTOCOL_TYPE_OFFSET, protocolTypeId);
+ buffer.putInt(ZILLA_HEADER_OFFSET + ipv6JumboLength, frameTypeId);
+ buffer.putInt(ZILLA_PROTOCOL_TYPE_OFFSET + ipv6JumboLength, protocolTypeId);
+ buffer.putInt(ZILLA_WORKER_OFFSET + ipv6JumboLength, worker);
+ buffer.putInt(ZILLA_OFFSET_OFFSET + ipv6JumboLength, offset);
}
private int encodeZillaLabels(
diff --git a/incubator/command-dump/src/main/lua/zilla.lua b/incubator/command-dump/src/main/lua/zilla.lua
deleted file mode 100644
index fc5d06ac36..0000000000
--- a/incubator/command-dump/src/main/lua/zilla.lua
+++ /dev/null
@@ -1,418 +0,0 @@
---[[
-
- Copyright 2021-2023 Aklivity Inc
-
- Licensed under the Aklivity Community License (the "License"); you may not use
- this file except in compliance with the License. You may obtain a copy of the
- License at
-
- https://www.aklivity.io/aklivity-community-license/
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OF ANY KIND, either express or implied. See the License for the
- specific language governing permissions and limitations under the License.
-
-]]
-zilla_protocol = Proto("Zilla", "Zilla Frames")
-
-HEADER_OFFSET = 0
-LABELS_OFFSET = 8
-
-BEGIN_ID = 0x00000001
-DATA_ID = 0x00000002
-END_ID = 0x00000003
-ABORT_ID = 0x00000004
-FLUSH_ID = 0x00000005
-RESET_ID = 0x40000001
-WINDOW_ID = 0x40000002
-SIGNAL_ID = 0x40000003
-CHALLENGE_ID = 0x40000004
-
-AMQP_ID = 0x112dc182
-GRPC_ID = 0xf9c7583a
-HTTP_ID = 0x8ab62046
-KAFKA_ID = 0x084b20e1
-MQTT_ID = 0xd0d41a76
-PROXY_ID = 0x8dcea850
-TLS_ID = 0x99f321bc
-
-local flags_types = {
- [0] = "Not set",
- [1] = "Set"
-}
-
-local fields = {
- -- header
- frame_type_id = ProtoField.uint32("zilla.frame_type_id", "Frame Type ID", base.HEX),
- frame_type = ProtoField.string("zilla.frame_type", "Frame Type", base.NONE),
- protocol_type_id = ProtoField.uint32("zilla.protocol_type_id", "Protocol Type ID", base.HEX),
- protocol_type = ProtoField.string("zilla.protocol_type", "Protocol Type", base.NONE),
- stream_type_id = ProtoField.uint32("zilla.stream_type_id", "Stream Type ID", base.HEX),
- stream_type = ProtoField.string("zilla.stream_type", "Stream Type", base.NONE),
-
- -- labels
- origin_namespace = ProtoField.string("zilla.origin_namespace", "Origin Namespace", base.STRING),
- origin_binding = ProtoField.string("zilla.origin_binding", "Origin Binding", base.STRING),
- routed_namespace = ProtoField.string("zilla.routed_namespace", "Routed Namespace", base.STRING),
- routed_binding = ProtoField.string("zilla.routed_binding", "Routed Binding", base.STRING),
-
- -- all frames
- origin_id = ProtoField.uint64("zilla.origin_id", "Origin ID", base.HEX),
- routed_id = ProtoField.uint64("zilla.routed_id", "Routed ID", base.HEX),
- stream_id = ProtoField.uint64("zilla.stream_id", "Stream ID", base.HEX),
- direction = ProtoField.string("zilla.direction", "Direction", base.NONE),
- initial_id = ProtoField.uint64("zilla.initial_id", "Initial ID", base.HEX),
- reply_id = ProtoField.uint64("zilla.reply_id", "Reply ID", base.HEX),
- sequence = ProtoField.int64("zilla.sequence", "Sequence", base.DEC),
- acknowledge = ProtoField.int64("zilla.acknowledge", "Acknowledge", base.DEC),
- maximum = ProtoField.int32("zilla.maximum", "Maximum", base.DEC),
- timestamp = ProtoField.uint64("zilla.timestamp", "Timestamp", base.HEX),
- trace_id = ProtoField.uint64("zilla.trace_id", "Trace ID", base.HEX),
- authorization = ProtoField.uint64("zilla.authorization", "Authorization", base.HEX),
-
- -- almost all frames
- extension = ProtoField.bytes("zilla.extension", "Extension", base.NONE),
-
- -- begin frame
- affinity = ProtoField.uint64("zilla.affinity", "Affinity", base.HEX),
-
- -- data frame
- flags = ProtoField.uint8("zilla.flags", "Flags", base.HEX),
- flags_fin = ProtoField.uint8("zilla.flags_fin", "FIN", base.DEC, flags_types, 0x01),
- flags_init = ProtoField.uint8("zilla.flags_init", "INIT", base.DEC, flags_types, 0x02),
- flags_incomplete = ProtoField.uint8("zilla.flags_incomplete", "INCOMPLETE", base.DEC, flags_types, 0x04),
- flags_skip = ProtoField.uint8("zilla.flags_skip", "SKIP", base.DEC, flags_types, 0x08),
- budget_id = ProtoField.uint64("zilla.budget_id", "Budget ID", base.HEX),
- reserved = ProtoField.int32("zilla.reserved", "Reserved", base.DEC),
- length = ProtoField.int32("zilla.length", "Length", base.DEC),
- progress = ProtoField.int64("zilla.progress", "Progress", base.DEC),
- progress_maximum = ProtoField.string("zilla.progress_maximum", "Progress/Maximum", base.NONE),
- payload = ProtoField.protocol("zilla.payload", "Payload", base.HEX),
-
- -- window frame
- padding = ProtoField.int32("zilla.padding", "Padding", base.DEC),
- minimum = ProtoField.int32("zilla.minimum", "Minimum", base.DEC),
- capabilities = ProtoField.uint8("zilla.capabilities", "Capabilities", base.HEX),
-
- -- signal frame
- cancel_id = ProtoField.uint64("zilla.cancel_id", "Cancel ID", base.HEX),
- signal_id = ProtoField.int32("zilla.signal_id", "Signal ID", base.DEC),
- context_id = ProtoField.int32("zilla.context_id", "Context ID", base.DEC),
-}
-
-zilla_protocol.fields = fields;
-
-function zilla_protocol.dissector(buffer, pinfo, tree)
- if buffer:len() == 0 then return end
-
- local subtree = tree:add(zilla_protocol, buffer(), "Zilla Frame")
- local slices = {}
-
- -- header
- slices.frame_type_id = buffer(HEADER_OFFSET, 4)
- local frame_type_id = slices.frame_type_id:le_uint()
- local frame_type = resolve_frame_type(frame_type_id)
- subtree:add_le(fields.frame_type_id, slices.frame_type_id)
- subtree:add(fields.frame_type, frame_type)
-
- slices.protocol_type_id = buffer(HEADER_OFFSET + 4, 4)
- local protocol_type_id = slices.protocol_type_id:le_uint()
- local protocol_type = resolve_type(protocol_type_id)
- subtree:add_le(fields.protocol_type_id, slices.protocol_type_id)
- subtree:add(fields.protocol_type, protocol_type)
-
- -- labels
- slices.labels_length = buffer(LABELS_OFFSET, 4)
- local labels_length = slices.labels_length:le_uint()
- slices.labels = buffer(LABELS_OFFSET + 4, labels_length)
-
- -- origin id
- local frame_offset = LABELS_OFFSET + labels_length
- slices.origin_id = buffer(frame_offset + 4, 8)
- subtree:add_le(fields.origin_id, slices.origin_id)
-
- local label_offset = LABELS_OFFSET + 4;
- local origin_namespace_length = buffer(label_offset, 4):le_uint()
- label_offset = label_offset + 4
- slices.origin_namespace = buffer(label_offset, origin_namespace_length)
- label_offset = label_offset + origin_namespace_length
- if (origin_namespace_length > 0) then
- subtree:add(fields.origin_namespace, slices.origin_namespace)
- end
-
- local origin_binding_length = buffer(label_offset, 4):le_uint()
- label_offset = label_offset + 4
- slices.origin_binding = buffer(label_offset, origin_binding_length)
- label_offset = label_offset + origin_binding_length
- if (origin_binding_length > 0) then
- subtree:add(fields.origin_binding, slices.origin_binding)
- end
-
- -- routed id
- slices.routed_id = buffer(frame_offset + 12, 8)
- subtree:add_le(fields.routed_id, slices.routed_id)
-
- local routed_namespace_length = buffer(label_offset, 4):le_uint()
- label_offset = label_offset + 4
- slices.routed_namespace = buffer(label_offset, routed_namespace_length)
- label_offset = label_offset + routed_namespace_length
- if (routed_namespace_length > 0) then
- subtree:add(fields.routed_namespace, slices.routed_namespace)
- end
-
- local routed_binding_length = buffer(label_offset, 4):le_uint()
- label_offset = label_offset + 4
- slices.routed_binding = buffer(label_offset, routed_binding_length)
- label_offset = label_offset + routed_binding_length
- if (routed_binding_length > 0) then
- subtree:add(fields.routed_binding, slices.routed_binding)
- end
-
- -- stream id
- slices.stream_id = buffer(frame_offset + 20, 8)
- subtree:add_le(fields.stream_id, slices.stream_id)
- local stream_id = slices.stream_id:le_uint64();
- local direction
- local initial_id
- local reply_id
- if stream_id == UInt64(0) then
- direction = ""
- else
- if (stream_id % 2) == UInt64(0) then
- direction = "REP"
- initial_id = stream_id + UInt64(1)
- reply_id = stream_id
- else
- direction = "INI"
- initial_id = stream_id
- reply_id = stream_id - UInt64(1)
- end
- subtree:add(fields.initial_id, initial_id)
- subtree:add(fields.reply_id, reply_id)
- end
- subtree:add(fields.direction, direction)
-
- -- more frame properties
- slices.sequence = buffer(frame_offset + 28, 8)
- subtree:add_le(fields.sequence, slices.sequence)
- slices.acknowledge = buffer(frame_offset + 36, 8)
- subtree:add_le(fields.acknowledge, slices.acknowledge)
- slices.maximum = buffer(frame_offset + 44, 4)
- subtree:add_le(fields.maximum, slices.maximum)
- slices.timestamp = buffer(frame_offset + 48, 8)
- subtree:add_le(fields.timestamp, slices.timestamp)
- slices.trace_id = buffer(frame_offset + 56, 8)
- subtree:add_le(fields.trace_id, slices.trace_id)
- slices.authorization = buffer(frame_offset + 64, 8)
- subtree:add_le(fields.authorization, slices.authorization)
-
- pinfo.cols.protocol = zilla_protocol.name
- local info = "ZILLA " .. frame_type .. " " .. direction
- if protocol_type and protocol_type ~= "" then
- info = info .. " p=" .. protocol_type
- end
- pinfo.cols.info:set(info)
-
- -- begin
- if frame_type_id == BEGIN_ID then
- slices.affinity = buffer(frame_offset + 72, 8)
- subtree:add_le(fields.affinity, slices.affinity)
- handle_extension(buffer, slices, subtree, pinfo, info, frame_offset + 80)
- end
-
- -- data
- if frame_type_id == DATA_ID then
- slices.flags = buffer(frame_offset + 72, 1)
- local flags_label = string.format("Flags: 0x%02x", slices.flags:le_uint())
- local flagsSubtree = subtree:add(zilla_protocol, buffer(), flags_label)
- flagsSubtree:add_le(fields.flags_fin, slices.flags)
- flagsSubtree:add_le(fields.flags_init, slices.flags)
- flagsSubtree:add_le(fields.flags_incomplete, slices.flags)
- flagsSubtree:add_le(fields.flags_skip, slices.flags)
- slices.budget_id = buffer(frame_offset + 73, 8)
- subtree:add_le(fields.budget_id, slices.budget_id)
- slices.reserved = buffer(frame_offset + 81, 4)
- subtree:add_le(fields.reserved, slices.reserved)
-
- local sequence = slices.sequence:le_int64();
- local acknowledge = slices.acknowledge:le_int64();
- local maximum = slices.maximum:le_int();
- local reserved = slices.reserved:le_int();
- local progress = sequence - acknowledge + reserved;
- local progress_maximum = progress .. "/" .. maximum
- subtree:add(fields.progress, progress)
- subtree:add(fields.progress_maximum, progress_maximum)
- pinfo.cols.info:set(info .. " [" .. progress_maximum .. "]")
-
- local payloadSubtree = subtree:add(zilla_protocol, buffer(), "Payload")
- slices.length = buffer(frame_offset + 85, 4)
- local length = slices.length:le_int()
- slices.payload = buffer(frame_offset + 89, length)
- payloadSubtree:add_le(fields.length, slices.length)
- payloadSubtree:add(fields.payload, slices.payload)
- handle_extension(buffer, slices, subtree, pinfo, info, frame_offset + 89 + length)
-
- local dissector = resolve_dissector(protocol_type, slices.payload:tvb())
- if dissector then
- dissector:call(slices.payload:tvb(), pinfo, tree)
- end
- end
-
- -- end
- if frame_type_id == END_ID then
- handle_extension(buffer, slices, subtree, pinfo, info, frame_offset + 72)
- end
-
- -- abort
- if frame_type_id == ABORT_ID then
- handle_extension(buffer, slices, subtree, pinfo, info, frame_offset + 72)
- end
-
- -- flush
- if frame_type_id == FLUSH_ID then
- slices.budget_id = buffer(frame_offset + 72, 8)
- subtree:add_le(fields.budget_id, slices.budget_id)
- slices.reserved = buffer(frame_offset + 80, 4)
- subtree:add_le(fields.reserved, slices.reserved)
- handle_extension(buffer, slices, subtree, pinfo, info, frame_offset + 84)
- end
-
- -- reset
- if frame_type_id == RESET_ID then
- handle_extension(buffer, slices, subtree, pinfo, info, frame_offset + 72)
- end
-
- -- window
- if frame_type_id == WINDOW_ID then
- slices.budget_id = buffer(frame_offset + 72, 8)
- subtree:add_le(fields.budget_id, slices.budget_id)
- slices.padding = buffer(frame_offset + 80, 4)
- subtree:add_le(fields.padding, slices.padding)
- slices.minimum = buffer(frame_offset + 84, 4)
- subtree:add_le(fields.minimum, slices.minimum)
- slices.capabilities = buffer(frame_offset + 88, 1)
- subtree:add_le(fields.capabilities, slices.capabilities)
-
- local sequence = slices.sequence:le_int64();
- local acknowledge = slices.acknowledge:le_int64();
- local maximum = slices.maximum:le_int();
- local progress = sequence - acknowledge;
- local progress_maximum = progress .. "/" .. maximum
- subtree:add(fields.progress, progress)
- subtree:add(fields.progress_maximum, progress_maximum)
-
- pinfo.cols.info:set(info .. " [" .. progress_maximum .. "]")
- end
-
- -- signal
- if frame_type_id == SIGNAL_ID then
- slices.cancel_id = buffer(frame_offset + 72, 8)
- subtree:add_le(fields.cancel_id, slices.cancel_id)
- slices.signal_id = buffer(frame_offset + 80, 4)
- subtree:add_le(fields.signal_id, slices.signal_id)
- slices.context_id = buffer(frame_offset + 84, 4)
- subtree:add_le(fields.context_id, slices.context_id)
-
- local payloadSubtree = subtree:add(zilla_protocol, buffer(), "Payload")
- slices.length = buffer(frame_offset + 88, 4)
- local length = slices.length:le_int()
- slices.payload = buffer(frame_offset + 92, length)
- payloadSubtree:add_le(fields.length, slices.length)
- payloadSubtree:add(fields.payload, slices.payload)
- end
-
- -- challenge
- if frame_type_id == CHALLENGE_ID then
- handle_extension(buffer, slices, subtree, pinfo, info, frame_offset + 72)
- end
-end
-
-function resolve_frame_type(frame_type_id)
- local frame_type = ""
- if frame_type_id == BEGIN_ID then frame_type = "BEGIN"
- elseif frame_type_id == DATA_ID then frame_type = "DATA"
- elseif frame_type_id == END_ID then frame_type = "END"
- elseif frame_type_id == ABORT_ID then frame_type = "ABORT"
- elseif frame_type_id == FLUSH_ID then frame_type = "FLUSH"
- elseif frame_type_id == RESET_ID then frame_type = "RESET"
- elseif frame_type_id == WINDOW_ID then frame_type = "WINDOW"
- elseif frame_type_id == SIGNAL_ID then frame_type = "SIGNAL"
- elseif frame_type_id == CHALLENGE_ID then frame_type = "CHALLENGE"
- end
- return frame_type
-end
-
-function handle_extension(buffer, slices, subtree, pinfo, info, offset)
- if buffer:len() > offset then
- local extensionSubtree = subtree:add(zilla_protocol, buffer(), "Extension")
- slices.stream_type_id = buffer(offset, 4)
- extensionSubtree:add(fields.stream_type_id, slices.stream_type_id)
-
- local stream_type_id = slices.stream_type_id:le_uint();
- local stream_type = resolve_type(stream_type_id)
- extensionSubtree:add(fields.stream_type, stream_type)
-
- slices.extension = buffer(offset)
- extensionSubtree:add(fields.extension, slices.extension)
-
- if stream_type and stream_type ~= "" then
- pinfo.cols.info:set(info .. " s=" .. stream_type)
- end
- end
-end
-
-function resolve_type(type_id)
- local type = ""
- if type_id == AMQP_ID then type = "amqp"
- elseif type_id == GRPC_ID then type = "grpc"
- elseif type_id == HTTP_ID then type = "http"
- elseif type_id == KAFKA_ID then type = "kafka"
- elseif type_id == MQTT_ID then type = "mqtt"
- elseif type_id == PROXY_ID then type = "proxy"
- elseif type_id == TLS_ID then type = "tls"
- end
- return type
-end
-
-function resolve_dissector(protocol_type, payload)
- local dissector
- if protocol_type == "amqp" then dissector = Dissector.get("amqp")
- elseif protocol_type == "http" then dissector = resolve_http_dissector(payload)
- elseif protocol_type == "kafka" then dissector = Dissector.get("kafka")
- elseif protocol_type == "mqtt" then dissector = Dissector.get("mqtt")
- elseif protocol_type == "tls" then dissector = Dissector.get("tls")
- end
- return dissector
-end
-
-function resolve_http_dissector(payload)
- if payload:range(0, 3):int() + 9 == payload:len() then
- return Dissector.get("http2")
- elseif payload:range(0, 3):string() == "PRI" then
- return Dissector.get("http2")
- elseif payload:range(0, 4):string() == "HTTP" then
- return Dissector.get("http")
- elseif payload:range(0, 3):string() == "GET" then
- return Dissector.get("http")
- elseif payload:range(0, 4):string() == "POST" then
- return Dissector.get("http")
- elseif payload:range(0, 3):string() == "PUT" then
- return Dissector.get("http")
- elseif payload:range(0, 6):string() == "DELETE" then
- return Dissector.get("http")
- elseif payload:range(0, 4):string() == "HEAD" then
- return Dissector.get("http")
- elseif payload:range(0, 7):string() == "OPTIONS" then
- return Dissector.get("http")
- elseif payload:range(0, 5):string() == "TRACE" then
- return Dissector.get("http")
- elseif payload:range(0, 7):string() == "CONNECT" then
- return Dissector.get("http")
- else
- return nil
- end
-end
-
-local data_dissector = DissectorTable.get("tcp.port")
-data_dissector:add(7114, zilla_protocol)
diff --git a/incubator/command-dump/src/main/resources/META-INF/zilla/pcap.idl b/incubator/command-dump/src/main/resources/META-INF/zilla/pcap.idl
index fdc017d8f2..7b3089f255 100644
--- a/incubator/command-dump/src/main/resources/META-INF/zilla/pcap.idl
+++ b/incubator/command-dump/src/main/resources/META-INF/zilla/pcap.idl
@@ -39,13 +39,20 @@ scope pcap
{
int32 prefix; /* Version + Traffic class + Flow label = 32 bit */
int16 payload_length;
- int16 next_header_and_hop_limit;
+ int8 next_header;
+ int8 hop_limit;
int64 src_addr_part1;
int64 src_addr_part2;
int64 dst_addr_part1;
int64 dst_addr_part2;
}
+ struct IPv6JumboHeader
+ {
+ int32 prefix; /* Next Header + Header Ext Length + Option Type + Option Data Length */
+ int32 payload_length;
+ }
+
struct TcpHeader
{
int16 src_port;
diff --git a/incubator/command-dump/src/main/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/zilla.lua b/incubator/command-dump/src/main/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/zilla.lua
new file mode 100644
index 0000000000..5e8aea7035
--- /dev/null
+++ b/incubator/command-dump/src/main/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/zilla.lua
@@ -0,0 +1,848 @@
+--[[
+
+ Copyright 2021-2023 Aklivity Inc
+
+ Licensed under the Aklivity Community License (the "License"); you may not use
+ this file except in compliance with the License. You may obtain a copy of the
+ License at
+
+ https://www.aklivity.io/aklivity-community-license/
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OF ANY KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations under the License.
+
+]]
+
+local zilla_version = "@version@"
+if zilla_version == string.format("@%s@", "version") or zilla_version == "develop-SNAPSHOT" then
+ zilla_version = "dev"
+end
+
+local zilla_info = {
+ version = zilla_version,
+ author = "Aklivity, Inc.",
+ repository = "https://github.com/aklivity/zilla",
+ description = "Dissector for the internal protocol of Zilla"
+}
+set_plugin_info(zilla_info)
+
+local zilla_protocol = Proto("Zilla", "Zilla Frames")
+
+HEADER_OFFSET = 0
+LABELS_OFFSET = 16
+
+BEGIN_ID = 0x00000001
+DATA_ID = 0x00000002
+END_ID = 0x00000003
+ABORT_ID = 0x00000004
+FLUSH_ID = 0x00000005
+RESET_ID = 0x40000001
+WINDOW_ID = 0x40000002
+SIGNAL_ID = 0x40000003
+CHALLENGE_ID = 0x40000004
+
+AMQP_ID = 0x112dc182
+GRPC_ID = 0xf9c7583a
+HTTP_ID = 0x8ab62046
+KAFKA_ID = 0x084b20e1
+MQTT_ID = 0xd0d41a76
+PROXY_ID = 0x8dcea850
+TLS_ID = 0x99f321bc
+
+local flags_types = {
+ [0] = "Not set",
+ [1] = "Set"
+}
+
+local proxy_ext_address_family_types = {
+ [0] = "INET",
+ [1] = "INET4",
+ [2] = "INET6",
+ [3] = "UNIX",
+ [4] = "NONE",
+}
+
+local proxy_ext_address_protocol_types = {
+ [0] = "STREAM",
+ [1] = "DATAGRAM",
+}
+
+local proxy_ext_info_types = {
+ [0x01] = "ALPN",
+ [0x02] = "AUTHORITY",
+ [0x05] = "IDENTITY",
+ [0x20] = "SECURE",
+ [0x30] = "NAMESPACE",
+}
+
+local proxy_ext_secure_info_types = {
+ [0x21] = "VERSION",
+ [0x22] = "NAME",
+ [0x23] = "CIPHER",
+ [0x24] = "SIGNATURE",
+ [0x25] = "KEY",
+}
+
+local grpc_types = {
+ [0] = "TEXT",
+ [1] = "BASE64"
+}
+
+local fields = {
+ -- header
+ frame_type_id = ProtoField.uint32("zilla.frame_type_id", "Frame Type ID", base.HEX),
+ frame_type = ProtoField.string("zilla.frame_type", "Frame Type", base.NONE),
+ protocol_type_id = ProtoField.uint32("zilla.protocol_type_id", "Protocol Type ID", base.HEX),
+ protocol_type = ProtoField.string("zilla.protocol_type", "Protocol Type", base.NONE),
+ stream_type_id = ProtoField.uint32("zilla.stream_type_id", "Stream Type ID", base.HEX),
+ stream_type = ProtoField.string("zilla.stream_type", "Stream Type", base.NONE),
+ worker = ProtoField.uint32("zilla.worker", "Worker", base.DEC),
+ offset = ProtoField.uint32("zilla.offset", "Offset", base.HEX),
+
+ -- labels
+ origin_namespace = ProtoField.string("zilla.origin_namespace", "Origin Namespace", base.STRING),
+ origin_binding = ProtoField.string("zilla.origin_binding", "Origin Binding", base.STRING),
+ routed_namespace = ProtoField.string("zilla.routed_namespace", "Routed Namespace", base.STRING),
+ routed_binding = ProtoField.string("zilla.routed_binding", "Routed Binding", base.STRING),
+
+ -- all frames
+ origin_id = ProtoField.uint64("zilla.origin_id", "Origin ID", base.HEX),
+ routed_id = ProtoField.uint64("zilla.routed_id", "Routed ID", base.HEX),
+ stream_id = ProtoField.uint64("zilla.stream_id", "Stream ID", base.HEX),
+ direction = ProtoField.string("zilla.direction", "Direction", base.NONE),
+ initial_id = ProtoField.uint64("zilla.initial_id", "Initial ID", base.HEX),
+ reply_id = ProtoField.uint64("zilla.reply_id", "Reply ID", base.HEX),
+ sequence = ProtoField.int64("zilla.sequence", "Sequence", base.DEC),
+ acknowledge = ProtoField.int64("zilla.acknowledge", "Acknowledge", base.DEC),
+ maximum = ProtoField.int32("zilla.maximum", "Maximum", base.DEC),
+ timestamp = ProtoField.uint64("zilla.timestamp", "Timestamp", base.HEX),
+ trace_id = ProtoField.uint64("zilla.trace_id", "Trace ID", base.HEX),
+ authorization = ProtoField.uint64("zilla.authorization", "Authorization", base.HEX),
+
+ -- begin frame
+ affinity = ProtoField.uint64("zilla.affinity", "Affinity", base.HEX),
+
+ -- data frame
+ flags = ProtoField.uint8("zilla.flags", "Flags", base.HEX),
+ flags_fin = ProtoField.uint8("zilla.flags_fin", "FIN", base.DEC, flags_types, 0x01),
+ flags_init = ProtoField.uint8("zilla.flags_init", "INIT", base.DEC, flags_types, 0x02),
+ flags_incomplete = ProtoField.uint8("zilla.flags_incomplete", "INCOMPLETE", base.DEC, flags_types, 0x04),
+ flags_skip = ProtoField.uint8("zilla.flags_skip", "SKIP", base.DEC, flags_types, 0x08),
+ budget_id = ProtoField.uint64("zilla.budget_id", "Budget ID", base.HEX),
+ reserved = ProtoField.int32("zilla.reserved", "Reserved", base.DEC),
+ payload_length = ProtoField.int32("zilla.payload_length", "Length", base.DEC),
+ progress = ProtoField.int64("zilla.progress", "Progress", base.DEC),
+ progress_maximum = ProtoField.string("zilla.progress_maximum", "Progress/Maximum", base.NONE),
+ payload = ProtoField.protocol("zilla.payload", "Payload", base.HEX),
+
+ -- window frame
+ padding = ProtoField.int32("zilla.padding", "Padding", base.DEC),
+ minimum = ProtoField.int32("zilla.minimum", "Minimum", base.DEC),
+ capabilities = ProtoField.uint8("zilla.capabilities", "Capabilities", base.HEX),
+
+ -- signal frame
+ cancel_id = ProtoField.uint64("zilla.cancel_id", "Cancel ID", base.HEX),
+ signal_id = ProtoField.int32("zilla.signal_id", "Signal ID", base.DEC),
+ context_id = ProtoField.int32("zilla.context_id", "Context ID", base.DEC),
+
+ -- proxy extension
+ -- address
+ proxy_ext_address_family = ProtoField.uint8("zilla.proxy_ext.address_family", "Family", base.DEC,
+ proxy_ext_address_family_types),
+ proxy_ext_address_protocol = ProtoField.uint8("zilla.proxy_ext.address_protocol", "Protocol", base.DEC,
+ proxy_ext_address_protocol_types),
+ proxy_ext_address_inet_source_port = ProtoField.uint16("zilla.proxy_ext.address_inet_source_port", "Source Port",
+ base.DEC),
+ proxy_ext_address_inet_destination_port = ProtoField.uint16("zilla.proxy_ext.address_inet_destination_port",
+ "Destination Port", base.DEC),
+ proxy_ext_address_inet_source = ProtoField.string("zilla.proxy_ext.address_inet_source", "Source", base.NONE),
+ proxy_ext_address_inet_destination = ProtoField.string("zilla.proxy_ext.address_inet_destination", "Destination",
+ base.NONE),
+ proxy_ext_address_inet4_source = ProtoField.new("Source", "zilla.proxy_ext.address_inet4_source", ftypes.IPv4),
+ proxy_ext_address_inet4_destination = ProtoField.new("Destination", "zilla.proxy_ext.address_inet4_destination",
+ ftypes.IPv4),
+ proxy_ext_address_inet6_source = ProtoField.new("Source", "zilla.proxy_ext.address_inet6_source", ftypes.IPv6),
+ proxy_ext_address_inet6_destination = ProtoField.new("Destination", "zilla.proxy_ext.address_inet6_destination",
+ ftypes.IPv6),
+ proxy_ext_address_unix_source = ProtoField.string("zilla.proxy_ext.address_unix_source", "Source", base.NONE),
+ proxy_ext_address_unix_destination = ProtoField.string("zilla.proxy_ext.address_unix_destination", "Destination",
+ base.NONE),
+ -- info
+ proxy_ext_info_array_length = ProtoField.uint8("zilla.proxy_ext.info_array_length", "Length", base.DEC),
+ proxy_ext_info_array_size = ProtoField.uint8("zilla.proxy_ext.info_array_size", "Size", base.DEC),
+ proxy_ext_info_type = ProtoField.uint8("zilla.proxy_ext.info_type", "Type", base.HEX, proxy_ext_info_types),
+ proxy_ext_info_length = ProtoField.uint16("zilla.proxy_ext.info_length", "Length", base.DEC),
+ proxy_ext_info_alpn = ProtoField.string("zilla.proxy_ext.info_alpn", "Value", base.NONE),
+ proxy_ext_info_authority = ProtoField.string("zilla.proxy_ext.info_authority", "Value", base.NONE),
+ proxy_ext_info_identity = ProtoField.bytes("zilla.proxy_ext.info_identity", "Value", base.NONE),
+ proxy_ext_info_namespace = ProtoField.string("zilla.proxy_ext.info_namespace", "Value", base.NONE),
+ proxy_ext_info_secure = ProtoField.string("zilla.proxy_ext.info_secure", "Value", base.NONE),
+ proxy_ext_info_secure_type = ProtoField.uint8("zilla.proxy_ext.info_secure_type", "Secure Type", base.HEX,
+ proxy_ext_secure_info_types),
+
+ -- http extension
+ -- headers
+ http_ext_headers_array_length = ProtoField.uint8("zilla.http_ext.headers_array_length", "Length", base.DEC),
+ http_ext_headers_array_size = ProtoField.uint8("zilla.http_ext.headers_array_size", "Size", base.DEC),
+ http_ext_header_name_length = ProtoField.uint8("zilla.http_ext.header_name_length", "Length", base.DEC),
+ http_ext_header_name = ProtoField.string("zilla.http_ext.header_name", "Name", base.NONE),
+ http_ext_header_value_length = ProtoField.uint16("zilla.http_ext.header_value_length", "Length", base.DEC),
+ http_ext_header_value = ProtoField.string("zilla.http_ext.header_value", "Value", base.NONE),
+ -- promise id
+ http_ext_promise_id = ProtoField.uint64("zilla.promise_id", "Promise ID", base.HEX),
+
+ -- grpc extension
+ grpc_ext_scheme_length = ProtoField.uint16("zilla.grpc_ext.scheme_length", "Length", base.DEC),
+ grpc_ext_scheme = ProtoField.string("zilla.grpc_ext.scheme", "Scheme", base.NONE),
+ grpc_ext_authority_length = ProtoField.uint16("zilla.grpc_ext.authority_length", "Length", base.DEC),
+ grpc_ext_authority = ProtoField.string("zilla.grpc_ext.authority", "Authority", base.NONE),
+ grpc_ext_service_length = ProtoField.uint16("zilla.grpc_ext.service_length", "Length", base.DEC),
+ grpc_ext_service = ProtoField.string("zilla.grpc_ext.service", "Service", base.NONE),
+ grpc_ext_method_length = ProtoField.uint16("zilla.grpc_ext.method_length", "Length", base.DEC),
+ grpc_ext_method = ProtoField.string("zilla.grpc_ext.method", "Method", base.NONE),
+ grpc_ext_deferred = ProtoField.uint32("zilla.grpc_ext.deferred", "Deferred", base.DEC),
+ grpc_ext_status_length = ProtoField.uint16("zilla.grpc_ext.status_length", "Length", base.DEC),
+ grpc_ext_status = ProtoField.string("zilla.grpc_ext.status", "Status", base.NONE),
+ -- metadata
+ grpc_ext_metadata_array_length = ProtoField.uint8("zilla.grpc_ext.metadata_array_length", "Length", base.DEC),
+ grpc_ext_metadata_array_size = ProtoField.uint8("zilla.grpc_ext.metadata_array_size", "Size", base.DEC),
+ grpc_ext_metadata_type = ProtoField.uint32("zilla.grpc_ext.metadata_type", "Type", base.DEC, grpc_types),
+ grpc_ext_metadata_name_length_varint = ProtoField.bytes("zilla.grpc_ext.metadata_name_varint", "Length (Varint)", base.NONE),
+ grpc_ext_metadata_name_length = ProtoField.uint32("zilla.grpc_ext.metadata_name_length", "Length", base.DEC),
+ grpc_ext_metadata_name = ProtoField.string("zilla.grpc_ext.metadata_name", "Name", base.NONE),
+ grpc_ext_metadata_value_length_varint = ProtoField.bytes("zilla.grpc_ext.metadata_value_length_varint", "Length (Varint)", base.NONE),
+ grpc_ext_metadata_value_length = ProtoField.uint32("zilla.grpc_ext.metadata_value_length", "Length", base.DEC),
+ grpc_ext_metadata_value = ProtoField.string("zilla.grpc_ext.metadata_value", "Value", base.NONE),
+}
+
+zilla_protocol.fields = fields;
+
+function zilla_protocol.dissector(buffer, pinfo, tree)
+ if buffer:len() == 0 then return end
+ local subtree = tree:add(zilla_protocol, buffer(), "Zilla Frame")
+
+ -- header
+ local slice_frame_type_id = buffer(HEADER_OFFSET, 4)
+ local frame_type_id = slice_frame_type_id:le_uint()
+ local frame_type = resolve_frame_type(frame_type_id)
+ subtree:add_le(fields.frame_type_id, slice_frame_type_id)
+ subtree:add(fields.frame_type, frame_type)
+
+ local slice_protocol_type_id = buffer(HEADER_OFFSET + 4, 4)
+ local protocol_type_id = slice_protocol_type_id:le_uint()
+ local protocol_type = resolve_type(protocol_type_id)
+ subtree:add_le(fields.protocol_type_id, slice_protocol_type_id)
+ subtree:add(fields.protocol_type, protocol_type)
+
+ local slice_worker = buffer(HEADER_OFFSET + 8, 4)
+ local slice_offset = buffer(HEADER_OFFSET + 12, 4)
+ subtree:add_le(fields.worker, slice_worker)
+ subtree:add_le(fields.offset, slice_offset)
+
+ -- labels
+ local slice_labels_length = buffer(LABELS_OFFSET, 4)
+ local labels_length = slice_labels_length:le_uint()
+
+ -- origin id
+ local frame_offset = LABELS_OFFSET + labels_length
+ local slice_origin_id = buffer(frame_offset + 4, 8)
+ subtree:add_le(fields.origin_id, slice_origin_id)
+
+ local label_offset = LABELS_OFFSET + 4;
+ local origin_namespace_length = buffer(label_offset, 4):le_uint()
+ label_offset = label_offset + 4
+ local slice_origin_namespace = buffer(label_offset, origin_namespace_length)
+ label_offset = label_offset + origin_namespace_length
+ if (origin_namespace_length > 0) then
+ subtree:add(fields.origin_namespace, slice_origin_namespace)
+ end
+
+ local origin_binding_length = buffer(label_offset, 4):le_uint()
+ label_offset = label_offset + 4
+ local slice_origin_binding = buffer(label_offset, origin_binding_length)
+ label_offset = label_offset + origin_binding_length
+ if (origin_binding_length > 0) then
+ subtree:add(fields.origin_binding, slice_origin_binding)
+ end
+
+ -- routed id
+ local slice_routed_id = buffer(frame_offset + 12, 8)
+ subtree:add_le(fields.routed_id, slice_routed_id)
+
+ local routed_namespace_length = buffer(label_offset, 4):le_uint()
+ label_offset = label_offset + 4
+ slice_routed_namespace = buffer(label_offset, routed_namespace_length)
+ label_offset = label_offset + routed_namespace_length
+ if (routed_namespace_length > 0) then
+ subtree:add(fields.routed_namespace, slice_routed_namespace)
+ end
+
+ local routed_binding_length = buffer(label_offset, 4):le_uint()
+ label_offset = label_offset + 4
+ local slice_routed_binding = buffer(label_offset, routed_binding_length)
+ label_offset = label_offset + routed_binding_length
+ if (routed_binding_length > 0) then
+ subtree:add(fields.routed_binding, slice_routed_binding)
+ end
+
+ -- stream id
+ local slice_stream_id = buffer(frame_offset + 20, 8)
+ local stream_id = slice_stream_id:le_uint64();
+ subtree:add_le(fields.stream_id, slice_stream_id)
+ local direction
+ local initial_id
+ local reply_id
+ if stream_id == UInt64(0) then
+ direction = ""
+ else
+ if (stream_id % 2) == UInt64(0) then
+ direction = "REP"
+ initial_id = stream_id + UInt64(1)
+ reply_id = stream_id
+ else
+ direction = "INI"
+ initial_id = stream_id
+ reply_id = stream_id - UInt64(1)
+ end
+ subtree:add(fields.initial_id, initial_id)
+ subtree:add(fields.reply_id, reply_id)
+ end
+ subtree:add(fields.direction, direction)
+
+ -- more frame properties
+ local slice_sequence = buffer(frame_offset + 28, 8)
+ local sequence = slice_sequence:le_int64();
+ local slice_acknowledge = buffer(frame_offset + 36, 8)
+ local acknowledge = slice_acknowledge:le_int64();
+ local slice_maximum = buffer(frame_offset + 44, 4)
+ local maximum = slice_maximum:le_int();
+ local slice_timestamp = buffer(frame_offset + 48, 8)
+ local slice_trace_id = buffer(frame_offset + 56, 8)
+ local slice_authorization = buffer(frame_offset + 64, 8)
+ subtree:add_le(fields.sequence, slice_sequence)
+ subtree:add_le(fields.acknowledge, slice_acknowledge)
+ subtree:add_le(fields.maximum, slice_maximum)
+ subtree:add_le(fields.timestamp, slice_timestamp)
+ subtree:add_le(fields.trace_id, slice_trace_id)
+ subtree:add_le(fields.authorization, slice_authorization)
+
+ pinfo.cols.protocol = zilla_protocol.name
+ local info = string.format("ZILLA %s %s", frame_type, direction)
+ if protocol_type and protocol_type ~= "" then
+ info = string.format("%s p=%s", info, protocol_type)
+ end
+ pinfo.cols.info:set(info)
+
+ -- begin
+ if frame_type_id == BEGIN_ID then
+ local slice_affinity = buffer(frame_offset + 72, 8)
+ subtree:add_le(fields.affinity, slice_affinity)
+ handle_extension(buffer, subtree, pinfo, info, frame_offset + 80, frame_type_id)
+ end
+
+ -- data
+ if frame_type_id == DATA_ID then
+ local slice_flags = buffer(frame_offset + 72, 1)
+ local flags_label = string.format("Flags: 0x%02x", slice_flags:le_uint())
+ local flags_subtree = subtree:add(zilla_protocol, slice_flags, flags_label)
+ flags_subtree:add_le(fields.flags_fin, slice_flags)
+ flags_subtree:add_le(fields.flags_init, slice_flags)
+ flags_subtree:add_le(fields.flags_incomplete, slice_flags)
+ flags_subtree:add_le(fields.flags_skip, slice_flags)
+
+ local slice_budget_id = buffer(frame_offset + 73, 8)
+ local slice_reserved = buffer(frame_offset + 81, 4)
+ local reserved = slice_reserved:le_int();
+ local progress = sequence - acknowledge + reserved;
+ local progress_maximum = string.format("%s/%s", progress, maximum)
+ subtree:add_le(fields.budget_id, slice_budget_id)
+ subtree:add_le(fields.reserved, slice_reserved)
+ subtree:add(fields.progress, progress)
+ subtree:add(fields.progress_maximum, progress_maximum)
+ pinfo.cols.info:set(string.format("%s [%s]", info, progress_maximum))
+
+ local slice_payload_length = buffer(frame_offset + 85, 4)
+ local payload_length = math.max(slice_payload_length:le_int(), 0)
+ local slice_payload = buffer(frame_offset + 89, payload_length)
+ local payload_subtree = subtree:add(zilla_protocol, slice_payload, "Payload")
+ payload_subtree:add_le(fields.payload_length, slice_payload_length)
+ if (payload_length > 0) then
+ payload_subtree:add(fields.payload, slice_payload)
+ end
+
+ handle_extension(buffer, subtree, pinfo, info, frame_offset + 89 + payload_length, frame_type_id)
+
+ local dissector = resolve_dissector(protocol_type, slice_payload:tvb())
+ if dissector then
+ dissector:call(slice_payload:tvb(), pinfo, tree)
+ end
+ end
+
+ -- end
+ if frame_type_id == END_ID then
+ handle_extension(buffer, subtree, pinfo, info, frame_offset + 72, frame_type_id)
+ end
+
+ -- abort
+ if frame_type_id == ABORT_ID then
+ handle_extension(buffer, subtree, pinfo, info, frame_offset + 72, frame_type_id)
+ end
+
+ -- flush
+ if frame_type_id == FLUSH_ID then
+ local slice_budget_id = buffer(frame_offset + 72, 8)
+ local slice_reserved = buffer(frame_offset + 80, 4)
+ subtree:add_le(fields.budget_id, slice_budget_id)
+ subtree:add_le(fields.reserved, slice_reserved)
+ handle_extension(buffer, subtree, pinfo, info, frame_offset + 84, frame_type_id)
+ end
+
+ -- reset
+ if frame_type_id == RESET_ID then
+ handle_extension(buffer, subtree, pinfo, info, frame_offset + 72, frame_type_id)
+ end
+
+ -- window
+ if frame_type_id == WINDOW_ID then
+ local slice_budget_id = buffer(frame_offset + 72, 8)
+ local slice_padding = buffer(frame_offset + 80, 4)
+ local slice_minimum = buffer(frame_offset + 84, 4)
+ local slice_capabilities = buffer(frame_offset + 88, 1)
+ subtree:add_le(fields.budget_id, slice_budget_id)
+ subtree:add_le(fields.padding, slice_padding)
+ subtree:add_le(fields.minimum, slice_minimum)
+ subtree:add_le(fields.capabilities, slice_capabilities)
+
+ local progress = sequence - acknowledge;
+ local progress_maximum = string.format("%s/%s", progress, maximum)
+ subtree:add(fields.progress, progress)
+ subtree:add(fields.progress_maximum, progress_maximum)
+
+ pinfo.cols.info:set(string.format("%s [%s]", info, progress_maximum))
+ end
+
+ -- signal
+ if frame_type_id == SIGNAL_ID then
+ local slice_cancel_id = buffer(frame_offset + 72, 8)
+ local slice_signal_id = buffer(frame_offset + 80, 4)
+ local slice_context_id = buffer(frame_offset + 84, 4)
+ subtree:add_le(fields.cancel_id, slice_cancel_id)
+ subtree:add_le(fields.signal_id, slice_signal_id)
+ subtree:add_le(fields.context_id, slice_context_id)
+
+ local slice_payload_length = buffer(frame_offset + 88, 4)
+ local payload_length = math.max(slice_payload_length:le_int(), 0)
+ local slice_payload = buffer(frame_offset + 92, payload_length)
+ local payload_subtree = subtree:add(zilla_protocol, slice_payload, "Payload")
+ payload_subtree:add_le(fields.payload_length, slice_payload_length)
+ if (payload_length > 0) then
+ payload_subtree:add(fields.payload, slice_payload)
+ end
+ end
+
+ -- challenge
+ if frame_type_id == CHALLENGE_ID then
+ handle_extension(buffer, subtree, pinfo, info, frame_offset + 72, frame_type_id)
+ end
+end
+
+function resolve_frame_type(frame_type_id)
+ local frame_type = ""
+ if frame_type_id == BEGIN_ID then frame_type = "BEGIN"
+ elseif frame_type_id == DATA_ID then frame_type = "DATA"
+ elseif frame_type_id == END_ID then frame_type = "END"
+ elseif frame_type_id == ABORT_ID then frame_type = "ABORT"
+ elseif frame_type_id == FLUSH_ID then frame_type = "FLUSH"
+ elseif frame_type_id == RESET_ID then frame_type = "RESET"
+ elseif frame_type_id == WINDOW_ID then frame_type = "WINDOW"
+ elseif frame_type_id == SIGNAL_ID then frame_type = "SIGNAL"
+ elseif frame_type_id == CHALLENGE_ID then frame_type = "CHALLENGE"
+ end
+ return frame_type
+end
+
+function resolve_type(type_id)
+ local type = ""
+ if type_id == AMQP_ID then type = "amqp"
+ elseif type_id == GRPC_ID then type = "grpc"
+ elseif type_id == HTTP_ID then type = "http"
+ elseif type_id == KAFKA_ID then type = "kafka"
+ elseif type_id == MQTT_ID then type = "mqtt"
+ elseif type_id == PROXY_ID then type = "proxy"
+ elseif type_id == TLS_ID then type = "tls"
+ end
+ return type
+end
+
+function resolve_dissector(protocol_type, payload)
+ local dissector
+ if protocol_type == "amqp" then dissector = Dissector.get("amqp")
+ elseif protocol_type == "http" then dissector = resolve_http_dissector(payload)
+ elseif protocol_type == "kafka" then dissector = Dissector.get("kafka")
+ elseif protocol_type == "mqtt" then dissector = Dissector.get("mqtt")
+ elseif protocol_type == "tls" then dissector = Dissector.get("tls")
+ end
+ return dissector
+end
+
+function resolve_http_dissector(payload)
+ if payload:range(0, 3):int() + 9 == payload:len() then
+ return Dissector.get("http2")
+ elseif payload:range(0, 3):string() == "PRI" then
+ return Dissector.get("http2")
+ elseif payload:range(0, 4):string() == "HTTP" then
+ return Dissector.get("http")
+ elseif payload:range(0, 3):string() == "GET" then
+ return Dissector.get("http")
+ elseif payload:range(0, 4):string() == "POST" then
+ return Dissector.get("http")
+ elseif payload:range(0, 3):string() == "PUT" then
+ return Dissector.get("http")
+ elseif payload:range(0, 6):string() == "DELETE" then
+ return Dissector.get("http")
+ elseif payload:range(0, 4):string() == "HEAD" then
+ return Dissector.get("http")
+ elseif payload:range(0, 7):string() == "OPTIONS" then
+ return Dissector.get("http")
+ elseif payload:range(0, 5):string() == "TRACE" then
+ return Dissector.get("http")
+ elseif payload:range(0, 7):string() == "CONNECT" then
+ return Dissector.get("http")
+ else
+ return nil
+ end
+end
+
+function handle_extension(buffer, subtree, pinfo, info, offset, frame_type_id)
+ if buffer:len() > offset then
+ local slice_stream_type_id = buffer(offset, 4)
+ local stream_type_id = slice_stream_type_id:le_uint();
+ local stream_type = resolve_type(stream_type_id)
+ local extension_label = string.format("Extension: %s", stream_type)
+ local slice_extension = buffer(offset)
+ local extension_subtree = subtree:add(zilla_protocol, slice_extension, extension_label)
+ extension_subtree:add(fields.stream_type_id, slice_stream_type_id)
+ extension_subtree:add(fields.stream_type, stream_type)
+
+ if stream_type_id == PROXY_ID then
+ handle_proxy_extension(buffer, extension_subtree, offset + 4)
+ elseif stream_type_id == HTTP_ID then
+ handle_http_extension(buffer, extension_subtree, offset + 4, frame_type_id)
+ elseif stream_type_id == GRPC_ID then
+ handle_grpc_extension(buffer, extension_subtree, offset + 4, frame_type_id)
+ end
+
+ if stream_type and stream_type ~= "" then
+ pinfo.cols.info:set(string.format("%s s=%s", info, stream_type))
+ end
+ end
+end
+
+function handle_proxy_extension(buffer, extension_subtree, offset)
+ -- address
+ local slice_address_family = buffer(offset, 1)
+ local address_family_id = slice_address_family:le_int()
+ local address_family = proxy_ext_address_family_types[address_family_id]
+ local address_subtree_label = string.format("Address: %s", address_family)
+ local info_offset
+ if address_family == "INET" then
+ local slice_protocol = buffer(offset + 1, 1)
+ local source_length = buffer(offset + 2, 2):le_int()
+ local slice_source = buffer(offset + 4, source_length)
+ local destination_length = buffer(offset + 4 + source_length, 2):le_int()
+ local slice_destination = buffer(offset + 6 + source_length, destination_length)
+ local slice_source_port = buffer(offset + 6 + source_length + destination_length, 2)
+ local slice_destination_port = buffer(offset + 8 + source_length + destination_length, 2)
+ local length = 10 + source_length + destination_length
+ local address_subtree = extension_subtree:add(zilla_protocol, buffer(offset, length), address_subtree_label)
+ address_subtree:add(fields.proxy_ext_address_family, slice_address_family)
+ address_subtree:add(fields.proxy_ext_address_protocol, slice_protocol)
+ address_subtree:add(fields.proxy_ext_address_inet_source, slice_source)
+ address_subtree:add_le(fields.proxy_ext_address_inet_source_port, slice_source_port)
+ address_subtree:add(fields.proxy_ext_address_inet_destination, slice_destination)
+ address_subtree:add_le(fields.proxy_ext_address_inet_destination_port, slice_destination_port)
+ info_offset = offset + length
+ elseif address_family == "INET4" then
+ local slice_protocol = buffer(offset + 1, 1)
+ local slice_source = buffer(offset + 2, 4)
+ local slice_destination = buffer(offset + 6, 4)
+ local slice_source_port = buffer(offset + 10, 2)
+ local slice_destination_port = buffer(offset + 12, 2)
+ local length = 14;
+ local address_subtree = extension_subtree:add(zilla_protocol, buffer(offset, length), address_subtree_label)
+ address_subtree:add(fields.proxy_ext_address_family, slice_address_family)
+ address_subtree:add(fields.proxy_ext_address_protocol, slice_protocol)
+ address_subtree:add(fields.proxy_ext_address_inet4_source, slice_source)
+ address_subtree:add_le(fields.proxy_ext_address_inet_source_port, slice_source_port)
+ address_subtree:add(fields.proxy_ext_address_inet4_destination, slice_destination)
+ address_subtree:add_le(fields.proxy_ext_address_inet_destination_port, slice_destination_port)
+ info_offset = offset + length
+ elseif address_family == "INET6" then
+ local slice_protocol = buffer(offset + 1, 1)
+ local slice_source = buffer(offset + 2, 16)
+ local slice_destination = buffer(offset + 18, 16)
+ local slice_source_port = buffer(offset + 34, 2)
+ local slice_destination_port = buffer(offset + 36, 2)
+ local length = 38;
+ local address_subtree = extension_subtree:add(zilla_protocol, buffer(offset, length), address_subtree_label)
+ address_subtree:add(fields.proxy_ext_address_family, slice_address_family)
+ address_subtree:add(fields.proxy_ext_address_protocol, slice_protocol)
+ address_subtree:add(fields.proxy_ext_address_inet6_source, slice_source)
+ address_subtree:add_le(fields.proxy_ext_address_inet_source_port, slice_source_port)
+ address_subtree:add(fields.proxy_ext_address_inet6_destination, slice_destination)
+ address_subtree:add_le(fields.proxy_ext_address_inet_destination_port, slice_destination_port)
+ info_offset = offset + length;
+ elseif address_family == "UNIX" then
+ local slice_protocol = buffer(offset + 1, 1)
+ local slice_source = buffer(offset + 2, 108)
+ local slice_destination = buffer(offset + 110, 108)
+ local length = 218
+ local address_subtree = extension_subtree:add(zilla_protocol, buffer(offset, length), address_subtree_label)
+ address_subtree:add(fields.proxy_ext_address_family, slice_address_family)
+ address_subtree:add(fields.proxy_ext_address_protocol, slice_protocol)
+ address_subtree:add(fields.proxy_ext_address_unix_source, slice_source)
+ address_subtree:add(fields.proxy_ext_address_unix_destination, slice_destination)
+ info_offset = offset + length
+ elseif address_family == "NONE" then
+ local length = 1
+ local address_subtree = extension_subtree:add(zilla_protocol, buffer(offset, length), address_subtree_label)
+ address_subtree:add(fields.proxy_ext_address_family, slice_address_family)
+ info_offset = offset + length
+ end
+
+ -- info
+ local slice_info_array_length = buffer(info_offset, 4)
+ local slice_info_array_size = buffer(info_offset + 4, 4)
+ local info_array_length = slice_info_array_length:le_int()
+ local info_array_size = slice_info_array_size:le_int()
+ local length = 8
+ local label = string.format("Info (%d items)", info_array_size)
+ local info_array_subtree = extension_subtree:add(zilla_protocol, buffer(info_offset, length), label)
+ info_array_subtree:add_le(fields.proxy_ext_info_array_length, slice_info_array_length)
+ info_array_subtree:add_le(fields.proxy_ext_info_array_size, slice_info_array_size)
+ local item_offset = info_offset + length
+ for i = 1, info_array_size do
+ local slice_type_id = buffer(item_offset, 1)
+ local type_id = slice_type_id:le_int()
+ local type = proxy_ext_info_types[type_id]
+ local label_format = "Info: %s: %s"
+ item_offset = item_offset + 1
+ if type == "ALPN" then
+ local item_length, slice_length, slice_text = dissect_length_value(buffer, item_offset, 1)
+ add_proxy_string_as_subtree(buffer(item_offset - 1, item_length + 1), extension_subtree, label_format, slice_type_id,
+ slice_length, slice_text, fields.proxy_ext_info_type, fields.proxy_ext_info_length, fields.proxy_ext_info_alpn)
+ item_offset = item_offset + item_length
+ elseif type == "AUTHORITY" then
+ local item_length, slice_length, slice_text = dissect_length_value(buffer, item_offset, 2)
+ add_proxy_string_as_subtree(buffer(item_offset - 1, item_length + 1), extension_subtree, label_format, slice_type_id,
+ slice_length, slice_text, fields.proxy_ext_info_type, fields.proxy_ext_info_length, fields.proxy_ext_info_authority)
+ item_offset = item_offset + item_length
+ elseif type == "IDENTITY" then
+ local item_length, slice_length, slice_bytes = dissect_length_value(buffer, item_offset, 2)
+ local label = string.format("Info: %s: 0x%s", type, slice_bytes:bytes())
+ local subtree = extension_subtree:add(zilla_protocol, buffer(item_offset - 1, item_length + 1), label)
+ subtree:add(fields.proxy_ext_info_type, slice_type_id)
+ subtree:add_le(fields.proxy_ext_info_length, slice_length)
+ subtree:add(fields.proxy_ext_info_identity, slice_bytes)
+ item_offset = item_offset + item_length
+ elseif type == "SECURE" then
+ local slice_secure_type_id = buffer(item_offset, 1)
+ local secure_type_id = slice_secure_type_id:le_int();
+ local secure_type = proxy_ext_secure_info_types[secure_type_id]
+ item_offset = item_offset + 1
+ local length_length
+ if secure_type == "VERSION" or secure_type == "CIPHER" or secure_type == "SIGNATURE" or secure_type == "KEY" then
+ length_length = 1
+ elseif secure_type == "NAME" then
+ length_length = 2
+ end
+ local item_length, slice_length, slice_text = dissect_length_value(buffer, item_offset, length_length)
+ local label = string.format("Info: %s: %s: %s", type, secure_type, slice_text:string())
+ local subtree = extension_subtree:add(zilla_protocol, buffer(item_offset - 1, item_length + 1), label)
+ subtree:add(fields.proxy_ext_info_type, slice_type_id)
+ subtree:add(fields.proxy_ext_info_secure_type, slice_secure_type_id)
+ subtree:add_le(fields.proxy_ext_info_length, slice_length)
+ subtree:add(fields.proxy_ext_info_secure, slice_text)
+ item_offset = item_offset + item_length
+ elseif type == "NAMESPACE" then
+ local item_length, slice_length, slice_text = dissect_length_value(buffer, item_offset, 2)
+ add_proxy_string_as_subtree(buffer(item_offset - 1, item_length + 1), extension_subtree, label_format, slice_type_id,
+ slice_length, slice_text, fields.proxy_ext_info_type, fields.proxy_ext_info_length, fields.proxy_ext_info_namespace)
+ item_offset = item_offset + item_length
+ end
+ end
+end
+
+function dissect_length_value(buffer, item_offset, length_length)
+ local slice_length = buffer(item_offset, length_length)
+ local length = slice_length:le_int()
+ local slice_value = buffer(item_offset + length_length, length)
+ local item_length = length + length_length
+ return item_length, slice_length, slice_value
+end
+
+function add_proxy_string_as_subtree(buffer, tree, label_format, slice_type_id, slice_length, slice_text, field_type, field_length,
+ field_text)
+ local type_id = slice_type_id:le_int()
+ local type = proxy_ext_info_types[type_id]
+ local text = slice_text:string()
+ local label = string.format(label_format, type, text)
+ local subtree = tree:add(zilla_protocol, buffer, label)
+ subtree:add(field_type, slice_type_id)
+ subtree:add_le(field_length, slice_length)
+ subtree:add(field_text, slice_text)
+end
+
+function handle_http_extension(buffer, extension_subtree, offset, frame_type_id)
+ if frame_type_id == BEGIN_ID or frame_type_id == RESET_ID or frame_type_id == CHALLENGE_ID then
+ dissect_and_add_http_headers(buffer, extension_subtree, offset, "Headers", "Header")
+ elseif frame_type_id == END_ID then
+ dissect_and_add_http_headers(buffer, extension_subtree, offset, "Trailers", "Trailer")
+ elseif frame_type_id == FLUSH_ID then
+ slice_promise_id = buffer(offset, 8)
+ extension_subtree:add_le(fields.http_ext_promise_id, slice_promise_id)
+ dissect_and_add_http_headers(buffer, extension_subtree, offset + 8, "Promises", "Promise")
+ end
+end
+
+function dissect_and_add_http_headers(buffer, extension_subtree, offset, plural_name, singular_name)
+ local slice_headers_array_length = buffer(offset, 4)
+ local slice_headers_array_size = buffer(offset + 4, 4)
+ local headers_array_length = slice_headers_array_length:le_int()
+ local headers_array_size = slice_headers_array_size:le_int()
+ local length = 8
+ local label = string.format("%s (%d items)", plural_name, headers_array_size)
+ local headers_array_subtree = extension_subtree:add(zilla_protocol, buffer(offset, length), label)
+ headers_array_subtree:add_le(fields.http_ext_headers_array_length, slice_headers_array_length)
+ headers_array_subtree:add_le(fields.http_ext_headers_array_size, slice_headers_array_size)
+ local item_offset = offset + length
+ for i = 1, headers_array_size do
+ local name_length, slice_name_length, slice_name = dissect_length_value(buffer, item_offset, 1)
+ local value_offset = item_offset + name_length
+ local value_length, slice_value_length, slice_value = dissect_length_value(buffer, value_offset, 2)
+ local label = string.format("%s: %s: %s", singular_name, slice_name:string(), slice_value:string())
+ local subtree = extension_subtree:add(zilla_protocol, buffer(item_offset, name_length + value_length), label)
+ subtree:add_le(fields.http_ext_header_name_length, slice_name_length)
+ subtree:add(fields.http_ext_header_name, slice_name)
+ subtree:add_le(fields.http_ext_header_value_length, slice_value_length)
+ subtree:add(fields.http_ext_header_value, slice_value)
+ item_offset = item_offset + name_length + value_length
+ end
+end
+
+function handle_grpc_extension(buffer, extension_subtree, offset, frame_type_id)
+ if frame_type_id == BEGIN_ID then
+ -- scheme
+ local scheme_offset = offset
+ local scheme_length, slice_scheme_length, slice_scheme_text = dissect_length_value(buffer, scheme_offset, 2)
+ add_simple_string_as_subtree(buffer(scheme_offset, scheme_length), extension_subtree, "Scheme: %s", slice_scheme_length,
+ slice_scheme_text, fields.grpc_ext_scheme_length, fields.grpc_ext_scheme)
+ -- authority
+ local authority_offset = scheme_offset + scheme_length
+ local authority_length, slice_authority_length, slice_authority_text = dissect_length_value(buffer, authority_offset, 2)
+ add_simple_string_as_subtree(buffer(authority_offset, authority_length), extension_subtree, "Authority: %s", slice_authority_length,
+ slice_authority_text, fields.grpc_ext_authority_length, fields.grpc_ext_authority)
+ -- service
+ local service_offset = authority_offset + authority_length
+ local service_length, slice_service_length, slice_service_text = dissect_length_value(buffer, service_offset, 2)
+ add_simple_string_as_subtree(buffer(service_offset, service_length), extension_subtree, "Service: %s", slice_service_length,
+ slice_service_text, fields.grpc_ext_service_length, fields.grpc_ext_service)
+ -- method
+ local method_offset = service_offset + service_length
+ local method_length, slice_method_length, slice_method_text = dissect_length_value(buffer, method_offset, 2)
+ add_simple_string_as_subtree(buffer(method_offset, method_length), extension_subtree, "Method: %s", slice_method_length,
+ slice_method_text, fields.grpc_ext_method_length, fields.grpc_ext_method)
+ -- metadata array
+ local metadata_array_offset = method_offset + method_length
+ local slice_metadata_array_length = buffer(metadata_array_offset, 4)
+ local slice_metadata_array_size = buffer(metadata_array_offset + 4, 4)
+ local metadata_array_length = slice_metadata_array_length:le_int()
+ local metadata_array_size = slice_metadata_array_size:le_int()
+ local length = 8
+ local label = string.format("Metadata (%d items)", metadata_array_size)
+ local metadata_array_subtree = extension_subtree:add(zilla_protocol, buffer(metadata_array_offset, length), label)
+ metadata_array_subtree:add_le(fields.grpc_ext_metadata_array_length, slice_metadata_array_length)
+ metadata_array_subtree:add_le(fields.grpc_ext_metadata_array_size, slice_metadata_array_size)
+ local item_offset = metadata_array_offset + length
+ for i = 1, metadata_array_size do
+ local record_length = dissect_and_add_grpc_metadata(buffer, extension_subtree, item_offset)
+ item_offset = item_offset + record_length
+ end
+ elseif frame_type_id == DATA_ID then
+ local slice_deferred = buffer(offset, 4)
+ extension_subtree:add_le(fields.grpc_ext_deferred, slice_deferred)
+ elseif frame_type_id == ABORT_ID or frame_type_id == RESET_ID then
+ local status_length, slice_status_length, slice_status_text = dissect_length_value(buffer, offset, 2)
+ add_simple_string_as_subtree(buffer(offset, status_length), extension_subtree, "Status: %s", slice_status_length,
+ slice_status_text, fields.grpc_ext_status_length, fields.grpc_ext_status)
+ end
+end
+
+function add_simple_string_as_subtree(buffer, tree, label_format, slice_length, slice_text, field_length, field_text)
+ local text = slice_text:string()
+ local label = string.format(label_format, text)
+ local subtree = tree:add(zilla_protocol, buffer, label)
+ subtree:add_le(field_length, slice_length)
+ subtree:add(field_text, slice_text)
+end
+
+function dissect_and_add_grpc_metadata(buffer, extension_subtree, metadata_offset)
+ local offset = metadata_offset
+ -- type
+ local slice_type_id = buffer(offset, 1)
+ local type = grpc_types[slice_type_id:le_int()]
+ offset = offset + 1
+ -- name_length
+ local name_length, slice_name_length_varint, length_name_length = decode_varint32(buffer, offset)
+ offset = offset + length_name_length
+ -- name
+ local slice_name = buffer(offset, name_length)
+ local name = slice_name:string()
+ offset = offset + name_length
+ -- value_length
+ local value_length, slice_value_length_varint, length_value_length = decode_varint32(buffer, offset)
+ offset = offset + length_value_length
+ -- value
+ local slice_value = buffer(offset, value_length)
+ local value = slice_value:string()
+ -- add subtree
+ local record_length = 1 + length_name_length + name_length + length_value_length + value_length
+ local label = string.format("Metadata: [%s] %s: %s", type, name, value)
+ local metadata_subtree = extension_subtree:add(zilla_protocol, buffer(metadata_offset, record_length), label)
+ metadata_subtree:add(fields.grpc_ext_metadata_type, slice_type_id)
+ metadata_subtree:add(fields.grpc_ext_metadata_name_length_varint, slice_name_length_varint)
+ metadata_subtree:add(fields.grpc_ext_metadata_name_length, name_length)
+ metadata_subtree:add(fields.grpc_ext_metadata_name, slice_name)
+ metadata_subtree:add(fields.grpc_ext_metadata_value_length_varint, slice_value_length_varint)
+ metadata_subtree:add(fields.grpc_ext_metadata_value_length, value_length)
+ metadata_subtree:add(fields.grpc_ext_metadata_value, slice_value)
+ return record_length
+end
+
+function decode_varint32(buffer, offset)
+ local value = 0
+ local i = 0
+ local pos = offset
+ local b = buffer(pos, 1):le_int()
+
+ while bit.band(b, 0x80) ~= 0 do
+ value = bit.bor(value, bit.lshift(bit.band(b, 0x7F), i))
+ i = i + 7
+ if i > 35 then
+ error("varint32 value too long")
+ end
+ pos = pos + 1
+ b = buffer(pos, 1):le_int()
+ end
+
+ local unsigned = bit.bor(value, bit.lshift(b, i))
+ local result = bit.rshift(bit.bxor(bit.rshift(bit.lshift(unsigned, 31), 31), unsigned), 1)
+ result = bit.bxor(result, bit.band(unsigned, bit.lshift(1, 31)))
+ local length = pos - offset + 1
+ return result, buffer(offset, length), length
+end
+
+local data_dissector = DissectorTable.get("tcp.port")
+data_dissector:add(7114, zilla_protocol)
diff --git a/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/WiresharkIT.java b/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/WiresharkIT.java
new file mode 100644
index 0000000000..2f1850edf5
--- /dev/null
+++ b/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/WiresharkIT.java
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc
+ *
+ * Licensed under the Aklivity Community License (the "License"); you may not use
+ * this file except in compliance with the License. You may obtain a copy of the
+ * License at
+ *
+ * https://www.aklivity.io/aklivity-community-license/
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package io.aklivity.zilla.runtime.command.dump.internal.airline;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.testcontainers.containers.Container;
+import org.testcontainers.containers.ContainerFetchException;
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.containers.wait.strategy.WaitStrategy;
+import org.testcontainers.images.builder.ImageFromDockerfile;
+import org.testcontainers.images.builder.Transferable;
+import org.testcontainers.utility.DockerImageName;
+
+@TestInstance(PER_CLASS)
+public class WiresharkIT
+{
+ private static final String TSHARK_DOCKER_IMAGE = "kreinerattila/tshark:4.2.0";
+ private static final String COMMAND = "sleep infinity";
+ private static final WaitStrategy WAIT_STRATEGY = Wait.forSuccessfulCommand("echo 42");
+
+ private GenericContainer> tshark;
+
+ @BeforeAll
+ public void setUp() throws IOException
+ {
+ try
+ {
+ System.out.printf("Starting the container using image %s...%n", TSHARK_DOCKER_IMAGE);
+ DockerImageName image = DockerImageName.parse(TSHARK_DOCKER_IMAGE);
+ tshark = new GenericContainer<>(image)
+ .withCommand(COMMAND)
+ .waitingFor(WAIT_STRATEGY);
+ tshark.start();
+ }
+ catch (ContainerFetchException ex)
+ {
+ System.out.printf("Image %s was not found, building it now...%n", TSHARK_DOCKER_IMAGE);
+ ImageFromDockerfile image = new ImageFromDockerfile().withDockerfile(resourceToPath("Dockerfile"));
+ tshark = new GenericContainer<>(image)
+ .withCommand(COMMAND)
+ .waitingFor(WAIT_STRATEGY);
+ tshark.start();
+ }
+ assert tshark.isRunning();
+ System.out.printf("Container %s (%s) is running!%n", tshark.getContainerName(), tshark.getContainerId());
+ copyResource("zilla.lua", tshark, "/home/tshark/.local/lib/wireshark/plugins/zilla.lua");
+ }
+
+ @AfterAll
+ public void close()
+ {
+ tshark.close();
+ }
+
+ @Test
+ public void shouldMatchExpectedOutput() throws Exception
+ {
+ // GIVEN
+ String pcapFileName = "expected_dump.pcap";
+ String containerPath = String.format("/opt/%s", pcapFileName);
+ copyResource(pcapFileName, tshark, containerPath);
+ String expectedText = Files.readString(resourceToPath("expected_dump.txt"));
+
+ // WHEN
+ String protocols = "zilla,http,http2";
+ Container.ExecResult result = tshark.execInContainer("tshark", "-O", protocols, "-r", containerPath);
+
+ // THEN
+ assertThat(result.getExitCode(), equalTo(0));
+ assertThat(result.getStdout(), equalTo(expectedText));
+ }
+
+ @Test
+ public void shouldMatchExpectedFilteredOutput() throws Exception
+ {
+ // GIVEN
+ String pcapFileName = "expected_filtered_dump.pcap";
+ String containerPath = String.format("/opt/%s", pcapFileName);
+ copyResource(pcapFileName, tshark, containerPath);
+ String expectedText = Files.readString(resourceToPath("expected_filtered_dump.txt"));
+
+ // WHEN
+ Container.ExecResult result = tshark.execInContainer("tshark", "-O", "zilla", "-r", containerPath);
+
+ // THEN
+ assertThat(result.getExitCode(), equalTo(0));
+ assertThat(result.getStdout(), equalTo(expectedText));
+ }
+
+ private static Path resourceToPath(
+ String name)
+ {
+ URL resource = WiresharkIT.class.getResource(name);
+ assert resource != null;
+ return Path.of(URI.create(resource.toString()));
+ }
+
+ private static void copyResource(
+ String resourceName,
+ GenericContainer> container,
+ String containerPath) throws IOException
+ {
+ assert container.isRunning();
+ try (InputStream is = WiresharkIT.class.getResourceAsStream(resourceName))
+ {
+ assert is != null;
+ container.copyFileToContainer(Transferable.of(is.readAllBytes()), containerPath);
+ }
+ }
+}
diff --git a/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommandTest.java b/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommandTest.java
index 1cd8bb8831..6d3b4e22b2 100644
--- a/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommandTest.java
+++ b/incubator/command-dump/src/test/java/io/aklivity/zilla/runtime/command/dump/internal/airline/ZillaDumpCommandTest.java
@@ -15,34 +15,65 @@
package io.aklivity.zilla.runtime.command.dump.internal.airline;
import static java.util.Collections.singletonList;
-import static org.junit.jupiter.api.Assertions.assertArrayEquals;
-import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS;
import java.io.File;
import java.io.IOException;
+import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
+import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
+import org.agrona.BitUtil;
+import org.agrona.DirectBuffer;
import org.agrona.MutableDirectBuffer;
import org.agrona.concurrent.UnsafeBuffer;
import org.agrona.concurrent.ringbuffer.RingBuffer;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.io.TempDir;
-import io.aklivity.zilla.runtime.command.dump.internal.types.OctetsFW;
+import io.aklivity.zilla.runtime.command.dump.internal.types.String8FW;
+import io.aklivity.zilla.runtime.command.dump.internal.types.stream.AbortFW;
+import io.aklivity.zilla.runtime.command.dump.internal.types.stream.ChallengeFW;
import io.aklivity.zilla.runtime.command.dump.internal.types.stream.DataFW;
import io.aklivity.zilla.runtime.command.dump.internal.types.stream.EndFW;
+import io.aklivity.zilla.runtime.command.dump.internal.types.stream.FlushFW;
+import io.aklivity.zilla.runtime.command.dump.internal.types.stream.ResetFW;
+import io.aklivity.zilla.runtime.command.dump.internal.types.stream.SignalFW;
import io.aklivity.zilla.runtime.engine.internal.layouts.StreamsLayout;
+import io.aklivity.zilla.specs.binding.grpc.internal.GrpcFunctions;
+import io.aklivity.zilla.specs.binding.http.internal.HttpFunctions;
+import io.aklivity.zilla.specs.binding.proxy.internal.ProxyFunctions;
import io.aklivity.zilla.specs.engine.internal.types.stream.BeginFW;
import io.aklivity.zilla.specs.engine.internal.types.stream.WindowFW;
+@TestInstance(PER_CLASS)
public class ZillaDumpCommandTest
{
- private static String baseDir = "src/test/resources/io/aklivity/zilla/runtime/command/dump/internal";
+ private static final int WORKERS = 3;
+ private static final int STREAMS_CAPACITY = 8 * 1024;
+ private static final Path ENGINE_PATH =
+ Path.of("src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine");
+ private static final int GRPC_TYPE_ID = 2;
+ private static final int HTTP_TYPE_ID = 3;
+ private static final int PROXY_TYPE_ID = 5;
+
+ private final BeginFW.Builder beginRW = new BeginFW.Builder();
+ private final DataFW.Builder dataRW = new DataFW.Builder();
+ private final EndFW.Builder endRW = new EndFW.Builder();
+ private final AbortFW.Builder abortRW = new AbortFW.Builder();
+ private final FlushFW.Builder flushRW = new FlushFW.Builder();
+ private final ResetFW.Builder resetRW = new ResetFW.Builder();
+ private final WindowFW.Builder windowRW = new WindowFW.Builder();
+ private final SignalFW.Builder signalRW = new SignalFW.Builder();
+ private final ChallengeFW.Builder challengeRW = new ChallengeFW.Builder();
@TempDir
private File tempDir;
@@ -50,81 +81,124 @@ public class ZillaDumpCommandTest
private ZillaDumpCommand command;
@BeforeAll
- public static void generateStreamsBuffer()
+ @SuppressWarnings("checkstyle:methodlength")
+ public void generateStreamsBuffer() throws Exception
{
- StreamsLayout streamsLayout = new StreamsLayout.Builder()
- .path(Paths.get(baseDir, "engine").resolve("data0"))
- .streamsCapacity(8 * 1024)
- .readonly(false)
- .build();
-
- RingBuffer streams = streamsLayout.streamsBuffer();
-
- MutableDirectBuffer frameBuffer = new UnsafeBuffer(new byte[1024 * 8]);
-
- BeginFW begin = new BeginFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
+ RingBuffer[] streams = new RingBuffer[WORKERS];
+ for (int i = 0; i < WORKERS; i++)
+ {
+ StreamsLayout streamsLayout = new StreamsLayout.Builder()
+ .path(ENGINE_PATH.resolve(String.format("data%d", i)))
+ .streamsCapacity(STREAMS_CAPACITY)
+ .readonly(false)
+ .build();
+ streams[i] = streamsLayout.streamsBuffer();
+ }
+ MutableDirectBuffer frameBuffer = new UnsafeBuffer(new byte[STREAMS_CAPACITY]);
+
+ // worker 0
+ SignalFW signal1 = signalRW.wrap(frameBuffer, 0, frameBuffer.capacity())
.originId(0)
.routedId(0)
.streamId(0)
.sequence(0)
.acknowledge(0)
.maximum(0)
- .affinity(0)
+ .timestamp(0x0000000000000001L)
+ .traceId(0x0000000000000001L)
+ .cancelId(0x0000000000007701L)
+ .signalId(0x00007702)
+ .contextId(0x00007703)
.build();
+ streams[0].write(SignalFW.TYPE_ID, signal1.buffer(), 0, signal1.sizeof());
- streams.write(BeginFW.TYPE_ID, begin.buffer(), 0, begin.sizeof());
-
- BeginFW begin2 = new BeginFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
+ DirectBuffer helloBuf = new String8FW("Hello World!").value();
+ SignalFW signal2 = signalRW.wrap(frameBuffer, 0, frameBuffer.capacity())
.originId(0)
- .routedId(1)
- .streamId(1)
- .sequence(1)
+ .routedId(0)
+ .streamId(0)
+ .sequence(0)
.acknowledge(0)
.maximum(0)
- .affinity(0)
+ .timestamp(0x0000000000000002L)
+ .traceId(0x0000000000000000L)
+ .cancelId(0x0000000000007801L)
+ .signalId(0x00007802)
+ .contextId(0x00007803)
+ .payload(helloBuf, 0, helloBuf.capacity())
.build();
+ streams[0].write(SignalFW.TYPE_ID, signal2.buffer(), 0, signal2.sizeof());
- streams.write(BeginFW.TYPE_ID, begin2.buffer(), 0, begin2.sizeof());
-
- BeginFW filteredBegin = new BeginFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
- .originId(0)
- .routedId(4294967298L)
- .streamId(4)
- .sequence(4)
+ BeginFW begin1 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000005L) // INI
+ .sequence(0)
.acknowledge(0)
.maximum(0)
- .affinity(0)
+ .timestamp(0x0000000000000003L)
+ .traceId(0x0000000000000003L)
+ .affinity(0x0000000000000005L)
.build();
+ streams[0].write(BeginFW.TYPE_ID, begin1.buffer(), 0, begin1.sizeof());
- streams.write(BeginFW.TYPE_ID, filteredBegin.buffer(), 0, filteredBegin.sizeof());
-
- WindowFW window1 = new WindowFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
- .originId(0)
- .routedId(0)
- .streamId(0)
+ WindowFW window1 = windowRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000005L) // INI
.sequence(0)
.acknowledge(0)
- .maximum(0)
+ .maximum(65536)
+ .timestamp(0x0000000000000004L)
+ .traceId(0x0000000000000003L)
.budgetId(0)
.padding(0)
.build();
+ streams[0].write(WindowFW.TYPE_ID, window1.buffer(), 0, window1.sizeof());
- streams.write(WindowFW.TYPE_ID, window1.buffer(), 0, window1.sizeof());
-
- WindowFW window2 = new WindowFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
- .originId(0)
- .routedId(1)
- .streamId(1)
+ BeginFW begin2 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000004L) // REP
.sequence(1)
.acknowledge(0)
.maximum(0)
+ .timestamp(0x0000000000000005L)
+ .traceId(0x0000000000000003L)
+ .affinity(0)
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin2.buffer(), 0, begin2.sizeof());
+
+ WindowFW window2 = windowRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000004L) // REP
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(65536)
+ .timestamp(0x0000000000000006L)
+ .traceId(0x0000000000000003L)
.budgetId(0)
.padding(0)
.build();
+ streams[0].write(WindowFW.TYPE_ID, window2.buffer(), 0, window2.sizeof());
+
+ BeginFW filteredBegin = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000cL) // north_tls_server
+ .streamId(0x0000000000000077L) // INI
+ .sequence(71)
+ .acknowledge(72)
+ .maximum(73)
+ .timestamp(0x0000000000000007L)
+ .traceId(0x0000000000004202L)
+ .authorization(0x0000000000004203L)
+ .affinity(0x0000000000004204L)
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, filteredBegin.buffer(), 0, filteredBegin.sizeof());
- streams.write(WindowFW.TYPE_ID, window2.buffer(), 0, window2.sizeof());
-
- String payload = "POST / HTTP/1.1\n" +
+ String http1request =
+ "POST / HTTP/1.1\n" +
"Host: localhost:8080\n" +
"User-Agent: curl/7.85.0\n" +
"Accept: */*\n" +
@@ -132,60 +206,620 @@ public static void generateStreamsBuffer()
"Content-Length: 12\n" +
"\n" +
"Hello, world";
+ DirectBuffer http1requestBuf = new String8FW(http1request).value();
+ DataFW data1 = dataRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000005L) // INI
+ .sequence(123)
+ .acknowledge(456)
+ .maximum(777)
+ .timestamp(0x0000000000000008L)
+ .traceId(0x0000000000000003L)
+ .budgetId(0x0000000000004205L)
+ .reserved(0x00004206)
+ .payload(http1requestBuf, 0, http1requestBuf.capacity())
+ .build();
+ streams[0].write(DataFW.TYPE_ID, data1.buffer(), 0, data1.sizeof());
- byte[] payloadBytes = payload.getBytes(StandardCharsets.UTF_8);
+ String http1response =
+ "HTTP/1.1 200 OK\n" +
+ "Content-Type: text/plain\n" +
+ "Content-Length: 13\n" +
+ "\n" +
+ "Hello, World!";
+ DirectBuffer http1responseBuf = new String8FW(http1response).value();
+ DataFW data2 = dataRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000004L) // REP
+ .sequence(123)
+ .acknowledge(456)
+ .maximum(777)
+ .timestamp(0x0000000000000009L)
+ .traceId(0x0000000000000003L)
+ .budgetId(0x0000000000004205L)
+ .reserved(0x00004206)
+ .payload(http1responseBuf, 0, http1responseBuf.capacity())
+ .build();
+ streams[0].write(DataFW.TYPE_ID, data2.buffer(), 0, data2.sizeof());
+
+ ChallengeFW challenge1 = challengeRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000004L) // REP
+ .sequence(201)
+ .acknowledge(202)
+ .maximum(22222)
+ .timestamp(0x000000000000000aL)
+ .traceId(0x0000000000000003L)
+ .authorization(0x0000000000007742L)
+ .build();
+ streams[0].write(ChallengeFW.TYPE_ID, challenge1.buffer(), 0, challenge1.sizeof());
+
+ // POST https://localhost:7142/
+ byte[] h2request = BitUtil.fromHex(
+ "00002c0104000000018387418aa0e41d139d09b8e85a67847a8825b650c3cb85717f53032a2f2a5f87497ca58ae819aa0f0d023132");
+ DirectBuffer h2requestBuf = new UnsafeBuffer(h2request);
+ DataFW data3 = dataRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000005L) // INI
+ .sequence(123)
+ .acknowledge(456)
+ .maximum(777)
+ .timestamp(0x000000000000000bL)
+ .traceId(0x0000000000000003L)
+ .budgetId(0x0000000000004405L)
+ .reserved(0x00004206)
+ .payload(h2requestBuf, 0, h2requestBuf.capacity())
+ .build();
+ streams[0].write(DataFW.TYPE_ID, data3.buffer(), 0, data3.sizeof());
+
+ // 200 OK
+ byte[] h2response = BitUtil.fromHex(
+ "000026010400000001880f2b0a6375726c2f382e312e320f04032a2f2a0f100a746578742f706c61696e0f0d023132");
+ DirectBuffer h2responseBuf = new UnsafeBuffer(h2response);
+ DataFW data4 = dataRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000004L) // REP
+ .sequence(123)
+ .acknowledge(456)
+ .maximum(777)
+ .timestamp(0x000000000000000cL)
+ .traceId(0x0000000000000003L)
+ .budgetId(0x0000000000004405L)
+ .reserved(0x00004206)
+ .payload(h2responseBuf, 0, h2responseBuf.capacity())
+ .build();
+ streams[0].write(DataFW.TYPE_ID, data4.buffer(), 0, data4.sizeof());
+
+ DirectBuffer hello2Buf = new String8FW("Hello World!").value();
+ DataFW data5 = dataRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000004L) // REP
+ .sequence(123)
+ .acknowledge(456)
+ .maximum(777)
+ .timestamp(0x000000000000000dL)
+ .traceId(0x0000000000000003L)
+ .budgetId(0x0000000000004405L)
+ .reserved(0x00004206)
+ .payload(hello2Buf, 0, hello2Buf.capacity())
+ .build();
+ streams[0].write(DataFW.TYPE_ID, data5.buffer(), 0, data5.sizeof());
+
+ FlushFW flush1 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000004L) // REP
+ .sequence(301)
+ .acknowledge(302)
+ .maximum(3344)
+ .timestamp(0x000000000000000eL)
+ .traceId(0x0000000000000003L)
+ .budgetId(0x0000000000003300L)
+ .reserved(0x00003303)
+ .build();
+ streams[0].write(FlushFW.TYPE_ID, flush1.buffer(), 0, flush1.sizeof());
+
+ AbortFW abort1 = abortRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000005L) // INI
+ .sequence(401)
+ .acknowledge(402)
+ .maximum(4477)
+ .timestamp(0x000000000000000fL)
+ .traceId(0x0000000000000003L)
+ .build();
+ streams[0].write(AbortFW.TYPE_ID, abort1.buffer(), 0, abort1.sizeof());
+
+ ResetFW reset1 = resetRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000006L) // REP
+ .sequence(501)
+ .acknowledge(502)
+ .maximum(5577)
+ .timestamp(0x0000000000000010L)
+ .traceId(0x0000000000000003L)
+ .build();
+ streams[0].write(ResetFW.TYPE_ID, reset1.buffer(), 0, reset1.sizeof());
+
+ EndFW end1 = endRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000005L) // INI
+ .sequence(701)
+ .acknowledge(702)
+ .maximum(7777)
+ .timestamp(0x0000000000000011L)
+ .traceId(0x0000000000000003L)
+ .build();
+ streams[0].write(EndFW.TYPE_ID, end1.buffer(), 0, end1.sizeof());
+
+ EndFW end2 = endRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000004L) // REP
+ .sequence(703)
+ .acknowledge(704)
+ .maximum(4444)
+ .timestamp(0x0000000000000012L)
+ .traceId(0x0000000000000003L)
+ .build();
+ streams[0].write(EndFW.TYPE_ID, end2.buffer(), 0, end2.sizeof());
+
+ // proxy extension
+ DirectBuffer proxyBeginEx1 = new UnsafeBuffer(ProxyFunctions.beginEx()
+ .typeId(PROXY_TYPE_ID)
+ .addressInet()
+ .protocol("stream")
+ .source("192.168.0.77")
+ .destination("192.168.0.42")
+ .sourcePort(12345)
+ .destinationPort(442)
+ .build()
+ .build());
+ BeginFW begin3 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x0000000900000011L) // south_kafka_client
+ .routedId(0x0000000900000012L) // south_tcp_client
+ .streamId(0x0000000000000009L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x00000000000000013L)
+ .traceId(0x0000000000000009L)
+ .affinity(0x0000000000000000L)
+ .extension(proxyBeginEx1, 0, proxyBeginEx1.capacity())
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin3.buffer(), 0, begin3.sizeof());
+
+ DirectBuffer proxyBeginEx2 = new UnsafeBuffer(ProxyFunctions.beginEx()
+ .typeId(PROXY_TYPE_ID)
+ .addressInet4()
+ .protocol("stream")
+ .source("192.168.0.1")
+ .destination("192.168.0.254")
+ .sourcePort(32768)
+ .destinationPort(443)
+ .build()
+ .info()
+ .alpn("alpn")
+ .authority("authority")
+ .identity(BitUtil.fromHex("12345678"))
+ .namespace("namespace")
+ .secure()
+ .version("TLSv1.3")
+ .name("name")
+ .cipher("cipher")
+ .signature("signature")
+ .key("key")
+ .build()
+ .build()
+ .build());
+ BeginFW begin4 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x0000000900000011L) // south_kafka_client
+ .routedId(0x0000000900000012L) // south_tcp_client
+ .streamId(0x0000000000000009L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x00000000000000014L)
+ .traceId(0x0000000000000009L)
+ .affinity(0x0000000000000000L)
+ .extension(proxyBeginEx2, 0, proxyBeginEx2.capacity())
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin4.buffer(), 0, begin4.sizeof());
+
+ DirectBuffer proxyBeginEx3 = new UnsafeBuffer(ProxyFunctions.beginEx()
+ .typeId(PROXY_TYPE_ID)
+ .addressInet6()
+ .protocol("stream")
+ .source("fd12:3456:789a:1::1")
+ .destination("fd12:3456:789a:1::fe")
+ .sourcePort(32768)
+ .destinationPort(443)
+ .build()
+ .build());
+ BeginFW begin5 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x0000000900000011L) // south_kafka_client
+ .routedId(0x0000000900000012L) // south_tcp_client
+ .streamId(0x0000000000000009L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x00000000000000015L)
+ .traceId(0x0000000000000009L)
+ .affinity(0x0000000000000000L)
+ .extension(proxyBeginEx3, 0, proxyBeginEx3.capacity())
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin5.buffer(), 0, begin5.sizeof());
+
+ DirectBuffer proxyBeginEx4 = new UnsafeBuffer(ProxyFunctions.beginEx()
+ .typeId(PROXY_TYPE_ID)
+ .addressUnix()
+ .protocol("datagram")
+ .source("unix-source")
+ .destination("unix-destination")
+ .build()
+ .build());
+ BeginFW begin6 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x0000000900000011L) // south_kafka_client
+ .routedId(0x0000000900000012L) // south_tcp_client
+ .streamId(0x0000000000000009L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x00000000000000016L)
+ .traceId(0x0000000000000009L)
+ .affinity(0x0000000000000000L)
+ .extension(proxyBeginEx4, 0, proxyBeginEx4.capacity())
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin6.buffer(), 0, begin6.sizeof());
+
+ DirectBuffer proxyBeginEx5 = new UnsafeBuffer(ProxyFunctions.beginEx()
+ .typeId(PROXY_TYPE_ID)
+ .addressNone()
+ .build()
+ .build());
+ BeginFW begin7 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x0000000900000011L) // south_kafka_client
+ .routedId(0x0000000900000012L) // south_tcp_client
+ .streamId(0x0000000000000009L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x00000000000000017L)
+ .traceId(0x0000000000000009L)
+ .affinity(0x0000000000000000L)
+ .extension(proxyBeginEx5, 0, proxyBeginEx5.capacity())
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin7.buffer(), 0, begin7.sizeof());
+
+ // http extension
+ DirectBuffer httpBeginEx1 = new UnsafeBuffer(HttpFunctions.beginEx()
+ .typeId(HTTP_TYPE_ID)
+ .header(":scheme", "http")
+ .header(":method", "GET")
+ .header(":path", "/hello")
+ .build());
+ BeginFW begin8 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000011L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x0000000000000018L)
+ .traceId(0x0000000000000011L)
+ .affinity(0x0000000000000000L)
+ .extension(httpBeginEx1, 0, httpBeginEx1.capacity())
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin8.buffer(), 0, begin8.sizeof());
+
+ DirectBuffer httpChallengeEx1 = new UnsafeBuffer(HttpFunctions.challengeEx()
+ .typeId(HTTP_TYPE_ID)
+ .header(":scheme", "http")
+ .header(":method", "GET")
+ .header(":path", "/hello")
+ .build());
+ ChallengeFW challenge2 = challengeRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000011L) // INI
+ .sequence(201)
+ .acknowledge(202)
+ .maximum(22222)
+ .timestamp(0x0000000000000019L)
+ .traceId(0x0000000000000011L)
+ .authorization(0x0000000000007742L)
+ .extension(httpChallengeEx1, 0, httpChallengeEx1.capacity())
+ .build();
+ streams[0].write(ChallengeFW.TYPE_ID, challenge2.buffer(), 0, challenge2.sizeof());
+
+ DirectBuffer httpFlushEx1 = new UnsafeBuffer(HttpFunctions.flushEx()
+ .typeId(HTTP_TYPE_ID)
+ .promiseId(0x0000000000000042L)
+ .promise(":scheme", "http")
+ .promise(":method", "GET")
+ .promise(":path", "/hello")
+ .build());
+ FlushFW flush2 = flushRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000010L) // REP
+ .sequence(301)
+ .acknowledge(302)
+ .maximum(3344)
+ .timestamp(0x0000000000000020L)
+ .traceId(0x0000000000000011L)
+ .budgetId(0x0000000000000000L)
+ .reserved(0x00000000)
+ .extension(httpFlushEx1, 0, httpFlushEx1.capacity())
+ .build();
+ streams[0].write(FlushFW.TYPE_ID, flush2.buffer(), 0, flush2.sizeof());
+
+ DirectBuffer httpResetEx1 = new UnsafeBuffer(HttpFunctions.resetEx()
+ .typeId(HTTP_TYPE_ID)
+ .header(":scheme", "http")
+ .header(":method", "GET")
+ .header(":path", "/hello")
+ .build());
+ ResetFW reset2 = resetRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000010L) // REP
+ .sequence(501)
+ .acknowledge(502)
+ .maximum(5577)
+ .timestamp(0x0000000000000021L)
+ .traceId(0x0000000000000011L)
+ .extension(httpResetEx1, 0, httpResetEx1.capacity())
+ .build();
+ streams[0].write(ResetFW.TYPE_ID, reset2.buffer(), 0, reset2.sizeof());
+
+ DirectBuffer httpEndEx1 = new UnsafeBuffer(HttpFunctions.endEx()
+ .typeId(HTTP_TYPE_ID)
+ .trailer(":scheme", "http")
+ .trailer(":method", "GET")
+ .trailer(":path", "/hello")
+ .build());
+ EndFW end3 = endRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0000000000000011L) // INI
+ .sequence(742)
+ .acknowledge(427)
+ .maximum(60000)
+ .timestamp(0x0000000000000022L)
+ .traceId(0x0000000000000011L)
+ .extension(httpEndEx1, 0, httpEndEx1.capacity())
+ .build();
+ streams[0].write(EndFW.TYPE_ID, end3.buffer(), 0, end3.sizeof());
- DataFW data1 = new DataFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
+ // worker 1
+ SignalFW signal3 = signalRW.wrap(frameBuffer, 0, frameBuffer.capacity())
.originId(0)
.routedId(0)
.streamId(0)
.sequence(0)
.acknowledge(0)
.maximum(0)
- .budgetId(0)
- .reserved(0)
- .payload(new OctetsFW().wrap(new UnsafeBuffer(payloadBytes), 0, payloadBytes.length))
+ .timestamp(0x0000000000000001L)
+ .traceId(0x0100000000000001L)
+ .cancelId(0x0000000000008801L)
+ .signalId(0x00008802)
+ .contextId(0x00008803)
.build();
+ streams[1].write(SignalFW.TYPE_ID, signal3.buffer(), 0, signal3.sizeof());
- streams.write(DataFW.TYPE_ID, data1.buffer(), 0, data1.sizeof());
-
- DataFW data2 = new DataFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
- .originId(0)
- .routedId(1)
- .streamId(1)
- .sequence(1)
+ BeginFW begin9 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0101000000000005L) // INI
+ .sequence(0)
.acknowledge(0)
.maximum(0)
- .budgetId(0)
- .reserved(0)
- .payload(new OctetsFW().wrap(new UnsafeBuffer(payloadBytes), 0, payloadBytes.length))
+ .timestamp(0x0000000000000002L)
+ .traceId(0x0100000000000003L)
+ .affinity(0x0101000000000005L)
.build();
+ streams[1].write(BeginFW.TYPE_ID, begin9.buffer(), 0, begin9.sizeof());
+
+ EndFW end4 = endRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0101000000000004L) // REP
+ .sequence(703)
+ .acknowledge(704)
+ .maximum(4444)
+ .timestamp(0x0000000000000003L)
+ .traceId(0x0100000000000003L)
+ .build();
+ streams[1].write(EndFW.TYPE_ID, end4.buffer(), 0, end4.sizeof());
- streams.write(DataFW.TYPE_ID, data2.buffer(), 0, data2.sizeof());
-
-
- EndFW end1 = new EndFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
+ // worker 2
+ SignalFW signal4 = signalRW.wrap(frameBuffer, 0, frameBuffer.capacity())
.originId(0)
.routedId(0)
.streamId(0)
.sequence(0)
.acknowledge(0)
.maximum(0)
+ .timestamp(0x0000000000000001L)
+ .traceId(0x0200000000000001L)
+ .cancelId(0x0000000000008801L)
+ .signalId(0x00009902)
+ .contextId(0x00009903)
.build();
+ streams[2].write(SignalFW.TYPE_ID, signal4.buffer(), 0, signal4.sizeof());
- streams.write(EndFW.TYPE_ID, end1.buffer(), 0, end1.sizeof());
-
- EndFW end2 = new EndFW.Builder().wrap(frameBuffer, 0, frameBuffer.capacity())
- .originId(0)
- .routedId(1)
- .streamId(1)
- .sequence(1)
+ BeginFW begin10 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0202000000000005L) // INI
+ .sequence(0)
.acknowledge(0)
.maximum(0)
+ .timestamp(0x0000000000000002L)
+ .traceId(0x0200000000000003L)
+ .affinity(0x0202000000000005L)
.build();
-
- streams.write(EndFW.TYPE_ID, end2.buffer(), 0, end2.sizeof());
-
+ streams[2].write(BeginFW.TYPE_ID, begin10.buffer(), 0, begin10.sizeof());
+
+ EndFW end5 = endRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000000bL) // north_tcp_server
+ .routedId(0x000000090000000dL) // north_http_server
+ .streamId(0x0202000000000004L) // REP
+ .sequence(703)
+ .acknowledge(704)
+ .maximum(4444)
+ .timestamp(0x0000000000000003L)
+ .traceId(0x0200000000000003L)
+ .build();
+ streams[2].write(EndFW.TYPE_ID, end5.buffer(), 0, end5.sizeof());
+
+ // worker 0
+ // grpc extension
+ DirectBuffer grpcBeginEx1 = new UnsafeBuffer(GrpcFunctions.beginEx()
+ .typeId(GRPC_TYPE_ID)
+ .scheme("http")
+ .authority("localhost:7153")
+ .service("example.EchoService")
+ .method("EchoUnary")
+ .metadata("grpc-accept-encoding", "gzip")
+ .metadata("metadata-2", "hello")
+ .metadata("BASE64", "metadata-3", "4242")
+ .build());
+ BeginFW begin11 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000001aL) // north_grpc_server
+ .routedId(0x000000090000001bL) // north_grpc_kafka_mapping
+ .streamId(0x0000000000000013L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x0000000000000023L)
+ .traceId(0x0000000000000013L)
+ .affinity(0x0000000000000000L)
+ .extension(grpcBeginEx1, 0, grpcBeginEx1.capacity())
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin11.buffer(), 0, begin11.sizeof());
+
+ DirectBuffer grpcBeginEx2 = new UnsafeBuffer(GrpcFunctions.beginEx()
+ .typeId(GRPC_TYPE_ID)
+ .scheme("http")
+ .authority("localhost:7153")
+ .service("example.EchoService")
+ .method("EchoUnary")
+ .metadata("long field", "Z".repeat(200))
+ .metadata("metadata-2", "hello")
+ .build());
+ BeginFW begin12 = beginRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000001aL) // north_grpc_server
+ .routedId(0x000000090000001bL) // north_grpc_kafka_mapping
+ .streamId(0x0000000000000012L) // REP
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x0000000000000024L)
+ .traceId(0x0000000000000013L)
+ .affinity(0x0000000000000000L)
+ .extension(grpcBeginEx2, 0, grpcBeginEx2.capacity())
+ .build();
+ streams[0].write(BeginFW.TYPE_ID, begin12.buffer(), 0, begin12.sizeof());
+
+ // data frame with extension, without payload, payload length is -1
+ DirectBuffer grpcDataEx1 = new UnsafeBuffer(new byte[]{GRPC_TYPE_ID, 0, 0, 0, 42, 0, 0, 0});
+ DataFW data6 = dataRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000001aL) // north_grpc_server
+ .routedId(0x000000090000001bL) // north_grpc_kafka_mapping
+ .streamId(0x0000000000000013L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x0000000000000025L)
+ .traceId(0x0000000000000013L)
+ .budgetId(0x0000000000000013L)
+ .reserved(0x00000042)
+ .extension(grpcDataEx1, 0, grpcDataEx1.capacity())
+ .build();
+ streams[0].write(DataFW.TYPE_ID, data6.buffer(), 0, data6.sizeof());
+
+ // data frame with extension, without payload, payload length is 0
+ DirectBuffer grpcDataPayload1 = new UnsafeBuffer();
+ DirectBuffer grpcDataEx2 = new UnsafeBuffer(new byte[]{GRPC_TYPE_ID, 0, 0, 0, 77, 0, 0, 0});
+ DataFW data7 = dataRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000001aL) // north_grpc_server
+ .routedId(0x000000090000001bL) // north_grpc_kafka_mapping
+ .streamId(0x0000000000000012L) // REP
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x0000000000000026L)
+ .traceId(0x0000000000000013L)
+ .budgetId(0x0000000000000013L)
+ .reserved(0x00000042)
+ .payload(grpcDataPayload1, 0, grpcDataPayload1.capacity())
+ .extension(grpcDataEx2, 0, grpcDataEx2.capacity())
+ .build();
+ streams[0].write(DataFW.TYPE_ID, data7.buffer(), 0, data7.sizeof());
+
+ // data frame with extension, with payload
+ DirectBuffer grpcDataPayload2 = new UnsafeBuffer("Hello World!".getBytes(StandardCharsets.UTF_8));
+ DirectBuffer grpcDataEx3 = new UnsafeBuffer(new byte[]{GRPC_TYPE_ID, 0, 0, 0, 88, 0, 0, 0});
+ DataFW data8 = dataRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000001aL) // north_grpc_server
+ .routedId(0x000000090000001bL) // north_grpc_kafka_mapping
+ .streamId(0x0000000000000013L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x0000000000000027L)
+ .traceId(0x0000000000000013L)
+ .budgetId(0x0000000000000013L)
+ .reserved(0x00000042)
+ .payload(grpcDataPayload2, 0, grpcDataPayload2.capacity())
+ .extension(grpcDataEx3, 0, grpcDataEx3.capacity())
+ .build();
+ streams[0].write(DataFW.TYPE_ID, data8.buffer(), 0, data8.sizeof());
+
+ DirectBuffer grpcAbortEx1 = new UnsafeBuffer(GrpcFunctions.abortEx()
+ .typeId(GRPC_TYPE_ID)
+ .status("aborted")
+ .build());
+ AbortFW abort2 = abortRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000001aL) // north_grpc_server
+ .routedId(0x000000090000001bL) // north_grpc_kafka_mapping
+ .streamId(0x0000000000000013L) // INI
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x0000000000000028L)
+ .traceId(0x0000000000000013L)
+ .extension(grpcAbortEx1, 0, grpcAbortEx1.capacity())
+ .build();
+ streams[0].write(AbortFW.TYPE_ID, abort2.buffer(), 0, abort2.sizeof());
+
+ DirectBuffer grpcResetEx1 = new UnsafeBuffer(GrpcFunctions.abortEx()
+ .typeId(GRPC_TYPE_ID)
+ .status("reset")
+ .build());
+ ResetFW reset3 = resetRW.wrap(frameBuffer, 0, frameBuffer.capacity())
+ .originId(0x000000090000001aL) // north_grpc_server
+ .routedId(0x000000090000001bL) // north_grpc_kafka_mapping
+ .streamId(0x0000000000000012L) // REP
+ .sequence(0)
+ .acknowledge(0)
+ .maximum(0)
+ .timestamp(0x0000000000000029L)
+ .traceId(0x0000000000000013L)
+ .extension(grpcResetEx1, 0, grpcResetEx1.capacity())
+ .build();
+ streams[0].write(ResetFW.TYPE_ID, reset3.buffer(), 0, reset3.sizeof());
}
@BeforeEach
@@ -194,36 +828,54 @@ public void init()
command = new ZillaDumpCommand();
command.verbose = true;
command.continuous = false;
- command.properties = List.of(String.format("zilla.engine.directory=%s", Paths.get(baseDir, "engine")));
- command.output = Paths.get(tempDir.getPath(), "test.pcap");
+ command.properties = List.of(String.format("zilla.engine.directory=%s", ENGINE_PATH));
+ command.output = Paths.get(tempDir.getPath(), "actual.pcap");
}
@Test
- public void shouldDumpWithoutFilter() throws IOException
+ public void shouldWritePcap() throws IOException
{
+ // GIVEN
+ byte[] expected = getResourceAsBytes("expected_dump.pcap");
+
+ // WHEN
command.run();
+ // THEN
File[] files = tempDir.listFiles();
- assertEquals(1, files.length);
-
- File expectedDump = new File(baseDir + "/expected_dump_without_filter.pcap");
- byte[] expected = Files.readAllBytes(expectedDump.toPath());
+ assert files != null;
byte[] actual = Files.readAllBytes(files[0].toPath());
- assertArrayEquals(expected, actual);
+ assertThat(files.length, equalTo(1));
+ assertThat(actual, equalTo(expected));
}
@Test
- public void shouldDumpWithKafkaFilter() throws IOException
+ public void shouldWriteFilteredPcap() throws IOException
{
- command.bindings = singletonList("test.kafka0");
+ // GIVEN
+ byte[] expected = getResourceAsBytes("expected_filtered_dump.pcap");
+
+ // WHEN
+ command.bindings = singletonList("example.north_tls_server");
command.run();
+ // THEN
File[] files = tempDir.listFiles();
- assertEquals(1, files.length);
-
- File expectedDump = new File(baseDir + "/expected_dump_with_kafka_filter.pcap");
- byte[] expected = Files.readAllBytes(expectedDump.toPath());
+ assert files != null;
byte[] actual = Files.readAllBytes(files[0].toPath());
- assertArrayEquals(expected, actual);
+ assertThat(files.length, equalTo(1));
+ assertThat(actual, equalTo(expected));
+ }
+
+ private static byte[] getResourceAsBytes(
+ String resourceName) throws IOException
+ {
+ byte[] bytes;
+ try (InputStream is = ZillaDumpCommandTest.class.getResourceAsStream(resourceName))
+ {
+ assert is != null;
+ bytes = is.readAllBytes();
+ }
+ return bytes;
}
}
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/Dockerfile b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/Dockerfile
new file mode 100644
index 0000000000..3bcc88e4de
--- /dev/null
+++ b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/Dockerfile
@@ -0,0 +1,23 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+FROM ubuntu:24.04
+
+RUN apt update
+RUN DEBIAN_FRONTEND=noninteractive apt install -y tshark
+RUN useradd -ms /bin/bash tshark
+
+USER tshark
+WORKDIR /home/tshark
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/bindings b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/bindings
new file mode 100644
index 0000000000..9ef435b692
Binary files /dev/null and b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/bindings differ
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data0 b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data0
new file mode 100644
index 0000000000..7b01191159
Binary files /dev/null and b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data0 differ
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/data0 b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data1
similarity index 87%
rename from incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/data0
rename to incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data1
index 7ab9eb3c0d..d3baacb795 100644
Binary files a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/data0 and b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data1 differ
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data2 b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data2
new file mode 100644
index 0000000000..e5b3ff595d
Binary files /dev/null and b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/data2 differ
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/labels b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/labels
new file mode 100644
index 0000000000..8b7de2bd28
--- /dev/null
+++ b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/engine/labels
@@ -0,0 +1,27 @@
+filesystem
+grpc
+http
+kafka
+proxy
+mqtt
+sse
+ws
+example
+my_servers
+north_tcp_server
+north_tls_server
+north_http_server
+north_http_kafka_mapping
+north_kafka_cache_client
+south_kafka_cache_server
+south_kafka_client
+south_tcp_client
+tcp
+server
+tls
+http-kafka
+cache_client
+cache_server
+client
+north_grpc_server
+north_grpc_kafka_mapping
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.pcap b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.pcap
new file mode 100644
index 0000000000..ef47fe0a9d
Binary files /dev/null and b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.pcap differ
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.txt b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.txt
new file mode 100644
index 0000000000..84ac74c530
--- /dev/null
+++ b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_dump.txt
@@ -0,0 +1,1765 @@
+Frame 1: 198 bytes on wire (1584 bits), 198 bytes captured (1584 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::1, Dst: fe80::
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 1, Len: 124
+Zilla Frame
+ Frame Type ID: 0x40000003
+ Frame Type: SIGNAL
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x00000000
+ Origin ID: 0x0000000000000000
+ Routed ID: 0x0000000000000000
+ Stream ID: 0x0000000000000000
+ Direction:
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000001
+ Trace ID: 0x0000000000000001
+ Authorization: 0x0000000000000000
+ Cancel ID: 0x0000000000007701
+ Signal ID: 30466
+ Context ID: 30467
+ Payload
+ Length: -1
+
+Frame 2: 198 bytes on wire (1584 bits), 198 bytes captured (1584 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80:0:0:1::1, Dst: fe80:0:0:1::
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 1, Len: 124
+Zilla Frame
+ Frame Type ID: 0x40000003
+ Frame Type: SIGNAL
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 1
+ Offset: 0x00000000
+ Origin ID: 0x0000000000000000
+ Routed ID: 0x0000000000000000
+ Stream ID: 0x0000000000000000
+ Direction:
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000001
+ Trace ID: 0x0100000000000001
+ Authorization: 0x0000000000000000
+ Cancel ID: 0x0000000000008801
+ Signal ID: 34818
+ Context ID: 34819
+ Payload
+ Length: -1
+
+Frame 3: 198 bytes on wire (1584 bits), 198 bytes captured (1584 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80:0:0:2::1, Dst: fe80:0:0:2::
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 1, Len: 124
+Zilla Frame
+ Frame Type ID: 0x40000003
+ Frame Type: SIGNAL
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 2
+ Offset: 0x00000000
+ Origin ID: 0x0000000000000000
+ Routed ID: 0x0000000000000000
+ Stream ID: 0x0000000000000000
+ Direction:
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000001
+ Trace ID: 0x0200000000000001
+ Authorization: 0x0000000000000000
+ Cancel ID: 0x0000000000008801
+ Signal ID: 39170
+ Context ID: 39171
+ Payload
+ Length: -1
+
+Frame 4: 210 bytes on wire (1680 bits), 210 bytes captured (1680 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::1, Dst: fe80::
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 125, Ack: 1, Len: 136
+Zilla Frame
+ Frame Type ID: 0x40000003
+ Frame Type: SIGNAL
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x00000060
+ Origin ID: 0x0000000000000000
+ Routed ID: 0x0000000000000000
+ Stream ID: 0x0000000000000000
+ Direction:
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000002
+ Trace ID: 0x0000000000000000
+ Authorization: 0x0000000000000000
+ Cancel ID: 0x0000000000007801
+ Signal ID: 30722
+ Context ID: 30723
+ Payload
+ Length: 12
+ Payload
+
+Frame 5: 233 bytes on wire (1864 bits), 233 bytes captured (1864 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::1:101:0:0:4, Dst: fe80::1:101:0:0:5
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 159
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 1
+ Offset: 0x00000060
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0101000000000005
+ Initial ID: 0x0101000000000005
+ Reply ID: 0x0101000000000004
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000002
+ Trace ID: 0x0100000000000003
+ Authorization: 0x0000000000000000
+ Affinity: 0x0101000000000005
+
+Frame 6: 233 bytes on wire (1864 bits), 233 bytes captured (1864 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::2:202:0:0:4, Dst: fe80::2:202:0:0:5
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 159
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 2
+ Offset: 0x00000060
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0202000000000005
+ Initial ID: 0x0202000000000005
+ Reply ID: 0x0202000000000004
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000002
+ Trace ID: 0x0200000000000003
+ Authorization: 0x0000000000000000
+ Affinity: 0x0202000000000005
+
+Frame 7: 233 bytes on wire (1864 bits), 233 bytes captured (1864 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::4, Dst: fe80::5
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 159
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x000000d0
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000005
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000003
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000005
+
+Frame 8: 225 bytes on wire (1800 bits), 225 bytes captured (1800 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::1:101:0:0:5, Dst: fe80::1:101:0:0:4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 159, Len: 151
+Zilla Frame
+ Frame Type ID: 0x00000003
+ Frame Type: END
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 1
+ Offset: 0x000000b8
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0101000000000004
+ Initial ID: 0x0101000000000005
+ Reply ID: 0x0101000000000004
+ Direction: REP
+ Sequence: 703
+ Acknowledge: 704
+ Maximum: 4444
+ Timestamp: 0x0000000000000003
+ Trace ID: 0x0100000000000003
+ Authorization: 0x0000000000000000
+
+Frame 9: 225 bytes on wire (1800 bits), 225 bytes captured (1800 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::2:202:0:0:5, Dst: fe80::2:202:0:0:4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 159, Len: 151
+Zilla Frame
+ Frame Type ID: 0x00000003
+ Frame Type: END
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 2
+ Offset: 0x000000b8
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0202000000000004
+ Initial ID: 0x0202000000000005
+ Reply ID: 0x0202000000000004
+ Direction: REP
+ Sequence: 703
+ Acknowledge: 704
+ Maximum: 4444
+ Timestamp: 0x0000000000000003
+ Trace ID: 0x0200000000000003
+ Authorization: 0x0000000000000000
+
+Frame 10: 242 bytes on wire (1936 bits), 242 bytes captured (1936 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::4, Dst: fe80::5
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 159, Ack: 1, Len: 168
+Zilla Frame
+ Frame Type ID: 0x40000002
+ Frame Type: WINDOW
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000128
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000005
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 65536
+ Timestamp: 0x0000000000000004
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Budget ID: 0x0000000000000000
+ Padding: 0
+ Minimum: 0
+ Capabilities: 0x00
+ Progress: 0
+ Progress/Maximum: 0/65536
+
+Frame 11: 233 bytes on wire (1864 bits), 233 bytes captured (1864 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::5, Dst: fe80::4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 327, Len: 159
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000188
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000004
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: REP
+ Sequence: 1
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000005
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+
+Frame 12: 242 bytes on wire (1936 bits), 242 bytes captured (1936 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::5, Dst: fe80::4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 160, Ack: 327, Len: 168
+Zilla Frame
+ Frame Type ID: 0x40000002
+ Frame Type: WINDOW
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x000001e0
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000004
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: REP
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 65536
+ Timestamp: 0x0000000000000006
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Budget ID: 0x0000000000000000
+ Padding: 0
+ Minimum: 0
+ Capabilities: 0x00
+ Progress: 0
+ Progress/Maximum: 0/65536
+
+Frame 13: 232 bytes on wire (1856 bits), 232 bytes captured (1856 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::76, Dst: fe80::77
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 158
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x99f321bc
+ Protocol Type: tls
+ Worker: 0
+ Offset: 0x00000240
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000c
+ Routed Namespace: example
+ Routed Binding: north_tls_server
+ Stream ID: 0x0000000000000077
+ Initial ID: 0x0000000000000077
+ Reply ID: 0x0000000000000076
+ Direction: INI
+ Sequence: 71
+ Acknowledge: 72
+ Maximum: 73
+ Timestamp: 0x0000000000000007
+ Trace ID: 0x0000000000004202
+ Authorization: 0x0000000000004203
+ Affinity: 0x0000000000004204
+
+Frame 14: 372 bytes on wire (2976 bits), 372 bytes captured (2976 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::4, Dst: fe80::5
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 327, Ack: 328, Len: 298
+Zilla Frame
+ Frame Type ID: 0x00000002
+ Frame Type: DATA
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000298
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000005
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: INI
+ Sequence: 123
+ Acknowledge: 456
+ Maximum: 777
+ Timestamp: 0x0000000000000008
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Flags: 0x03
+ .... ...1 = FIN: Set (1)
+ .... ..1. = INIT: Set (1)
+ .... .0.. = INCOMPLETE: Not set (0)
+ .... 0... = SKIP: Not set (0)
+ Budget ID: 0x0000000000004205
+ Reserved: 16902
+ Progress: 16569
+ Progress/Maximum: 16569/777
+ Payload
+ Length: 130
+ Payload
+Hypertext Transfer Protocol
+ POST / HTTP/1.1\n
+ [Expert Info (Chat/Sequence): POST / HTTP/1.1\n]
+ [POST / HTTP/1.1\n]
+ [Severity level: Chat]
+ [Group: Sequence]
+ Request Method: POST
+ Request URI: /
+ Request Version: HTTP/1.1
+ Host: localhost:8080\n
+ User-Agent: curl/7.85.0\n
+ Accept: */*\n
+ Content-Type: text/plain\n
+ Content-Length: 12\n
+ [Content length: 12]
+ \n
+ [Full request URI: http://localhost:8080/]
+ [HTTP request 1/1]
+ File Data: 12 bytes
+Line-based text data: text/plain (1 lines)
+
+Frame 15: 316 bytes on wire (2528 bits), 316 bytes captured (2528 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::5, Dst: fe80::4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 328, Ack: 625, Len: 242
+Zilla Frame
+ Frame Type ID: 0x00000002
+ Frame Type: DATA
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000378
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000004
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: REP
+ Sequence: 123
+ Acknowledge: 456
+ Maximum: 777
+ Timestamp: 0x0000000000000009
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Flags: 0x03
+ .... ...1 = FIN: Set (1)
+ .... ..1. = INIT: Set (1)
+ .... .0.. = INCOMPLETE: Not set (0)
+ .... 0... = SKIP: Not set (0)
+ Budget ID: 0x0000000000004205
+ Reserved: 16902
+ Progress: 16569
+ Progress/Maximum: 16569/777
+ Payload
+ Length: 74
+ Payload
+Hypertext Transfer Protocol
+ HTTP/1.1 200 OK\n
+ [Expert Info (Chat/Sequence): HTTP/1.1 200 OK\n]
+ [HTTP/1.1 200 OK\n]
+ [Severity level: Chat]
+ [Group: Sequence]
+ Response Version: HTTP/1.1
+ Status Code: 200
+ [Status Code Description: OK]
+ Response Phrase: OK
+ Content-Type: text/plain\n
+ Content-Length: 13\n
+ [Content length: 13]
+ \n
+ [HTTP response 1/1]
+ [Time since request: 0.000000000 seconds]
+ [Request in frame: 14]
+ [Request URI: http://localhost:8080/]
+ File Data: 13 bytes
+Line-based text data: text/plain (1 lines)
+
+Frame 16: 225 bytes on wire (1800 bits), 225 bytes captured (1800 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::5, Dst: fe80::4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 570, Ack: 625, Len: 151
+Zilla Frame
+ Frame Type ID: 0x40000004
+ Frame Type: CHALLENGE
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000420
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000004
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: REP
+ Sequence: 201
+ Acknowledge: 202
+ Maximum: 22222
+ Timestamp: 0x000000000000000a
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000007742
+
+Frame 17: 295 bytes on wire (2360 bits), 295 bytes captured (2360 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::4, Dst: fe80::5
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 625, Ack: 721, Len: 221
+Zilla Frame
+ Frame Type ID: 0x00000002
+ Frame Type: DATA
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000470
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000005
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: INI
+ Sequence: 123
+ Acknowledge: 456
+ Maximum: 777
+ Timestamp: 0x000000000000000b
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Flags: 0x03
+ .... ...1 = FIN: Set (1)
+ .... ..1. = INIT: Set (1)
+ .... .0.. = INCOMPLETE: Not set (0)
+ .... 0... = SKIP: Not set (0)
+ Budget ID: 0x0000000000004405
+ Reserved: 16902
+ Progress: 16569
+ Progress/Maximum: 16569/777
+ Payload
+ Length: 53
+ Payload
+HyperText Transfer Protocol 2
+ Stream: HEADERS, Stream ID: 1, Length 44, POST /
+ Length: 44
+ Type: HEADERS (1)
+ Flags: 0x04, End Headers
+ 00.0 ..0. = Unused: 0x00
+ ..0. .... = Priority: False
+ .... 0... = Padded: False
+ .... .1.. = End Headers: True
+ .... ...0 = End Stream: False
+ 0... .... .... .... .... .... .... .... = Reserved: 0x0
+ .000 0000 0000 0000 0000 0000 0000 0001 = Stream Identifier: 1
+ [Pad Length: 0]
+ Header Block Fragment: 8387418aa0e41d139d09b8e85a67847a8825b650c3cb85717f53032a2f2a5f87497ca58ae819aa0f0d023132
+ [Header Length: 184]
+ [Header Count: 8]
+ Header: :method: POST
+ Name Length: 7
+ Name: :method
+ Value Length: 4
+ Value: POST
+ :method: POST
+ [Unescaped: POST]
+ Representation: Indexed Header Field
+ Index: 3
+ Header: :scheme: https
+ Name Length: 7
+ Name: :scheme
+ Value Length: 5
+ Value: https
+ :scheme: https
+ [Unescaped: https]
+ Representation: Indexed Header Field
+ Index: 7
+ Header: :authority: localhost:7143
+ Name Length: 10
+ Name: :authority
+ Value Length: 14
+ Value: localhost:7143
+ :authority: localhost:7143
+ [Unescaped: localhost:7143]
+ Representation: Literal Header Field with Incremental Indexing - Indexed Name
+ Index: 1
+ Header: :path: /
+ Name Length: 5
+ Name: :path
+ Value Length: 1
+ Value: /
+ :path: /
+ [Unescaped: /]
+ Representation: Indexed Header Field
+ Index: 4
+ Header: user-agent: curl/8.1.2
+ Name Length: 10
+ Name: user-agent
+ Value Length: 10
+ Value: curl/8.1.2
+ user-agent: curl/8.1.2
+ [Unescaped: curl/8.1.2]
+ Representation: Literal Header Field with Incremental Indexing - Indexed Name
+ Index: 58
+ Header: accept: */*
+ Name Length: 6
+ Name: accept
+ Value Length: 3
+ Value: */*
+ accept: */*
+ [Unescaped: */*]
+ Representation: Literal Header Field with Incremental Indexing - Indexed Name
+ Index: 19
+ Header: content-type: text/plain
+ Name Length: 12
+ Name: content-type
+ Value Length: 10
+ Value: text/plain
+ content-type: text/plain
+ [Unescaped: text/plain]
+ Representation: Literal Header Field with Incremental Indexing - Indexed Name
+ Index: 31
+ Header: content-length: 12
+ Name Length: 14
+ Name: content-length
+ Value Length: 2
+ Value: 12
+ content-length: 12
+ [Unescaped: 12]
+ Representation: Literal Header Field without Indexing - Indexed Name
+ Index: 28
+ [Full request URI: https://localhost:7143/]
+
+Frame 18: 289 bytes on wire (2312 bits), 289 bytes captured (2312 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::5, Dst: fe80::4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 721, Ack: 846, Len: 215
+Zilla Frame
+ Frame Type ID: 0x00000002
+ Frame Type: DATA
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000508
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000004
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: REP
+ Sequence: 123
+ Acknowledge: 456
+ Maximum: 777
+ Timestamp: 0x000000000000000c
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Flags: 0x03
+ .... ...1 = FIN: Set (1)
+ .... ..1. = INIT: Set (1)
+ .... .0.. = INCOMPLETE: Not set (0)
+ .... 0... = SKIP: Not set (0)
+ Budget ID: 0x0000000000004405
+ Reserved: 16902
+ Progress: 16569
+ Progress/Maximum: 16569/777
+ Payload
+ Length: 47
+ Payload
+HyperText Transfer Protocol 2
+ Stream: HEADERS, Stream ID: 1, Length 38, 200 OK
+ Length: 38
+ Type: HEADERS (1)
+ Flags: 0x04, End Headers
+ 00.0 ..0. = Unused: 0x00
+ ..0. .... = Priority: False
+ .... 0... = Padded: False
+ .... .1.. = End Headers: True
+ .... ...0 = End Stream: False
+ 0... .... .... .... .... .... .... .... = Reserved: 0x0
+ .000 0000 0000 0000 0000 0000 0000 0001 = Stream Identifier: 1
+ [Pad Length: 0]
+ Header Block Fragment: 880f2b0a6375726c2f382e312e320f04032a2f2a0f100a746578742f706c61696e0f0d023132
+ [Header Length: 117]
+ [Header Count: 5]
+ Header: :status: 200 OK
+ Name Length: 7
+ Name: :status
+ Value Length: 3
+ Value: 200
+ :status: 200
+ [Unescaped: 200]
+ Representation: Indexed Header Field
+ Index: 8
+ Header: user-agent: curl/8.1.2
+ Name Length: 10
+ Name: user-agent
+ Value Length: 10
+ Value: curl/8.1.2
+ user-agent: curl/8.1.2
+ [Unescaped: curl/8.1.2]
+ Representation: Literal Header Field without Indexing - Indexed Name
+ Index: 58
+ Header: accept: */*
+ Name Length: 6
+ Name: accept
+ Value Length: 3
+ Value: */*
+ accept: */*
+ [Unescaped: */*]
+ Representation: Literal Header Field without Indexing - Indexed Name
+ Index: 19
+ Header: content-type: text/plain
+ Name Length: 12
+ Name: content-type
+ Value Length: 10
+ Value: text/plain
+ content-type: text/plain
+ [Unescaped: text/plain]
+ Representation: Literal Header Field without Indexing - Indexed Name
+ Index: 31
+ Header: content-length: 12
+ Name Length: 14
+ Name: content-length
+ Value Length: 2
+ Value: 12
+ content-length: 12
+ [Unescaped: 12]
+ Representation: Literal Header Field without Indexing - Indexed Name
+ Index: 28
+ [Time since request: 0.000000000 seconds]
+ [Request in frame: 17]
+
+Frame 19: 254 bytes on wire (2032 bits), 254 bytes captured (2032 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::5, Dst: fe80::4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 936, Ack: 846, Len: 180
+Zilla Frame
+ Frame Type ID: 0x00000002
+ Frame Type: DATA
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000598
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000004
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: REP
+ Sequence: 123
+ Acknowledge: 456
+ Maximum: 777
+ Timestamp: 0x000000000000000d
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Flags: 0x03
+ .... ...1 = FIN: Set (1)
+ .... ..1. = INIT: Set (1)
+ .... .0.. = INCOMPLETE: Not set (0)
+ .... 0... = SKIP: Not set (0)
+ Budget ID: 0x0000000000004405
+ Reserved: 16902
+ Progress: 16569
+ Progress/Maximum: 16569/777
+ Payload
+ Length: 12
+ Payload
+
+Frame 20: 237 bytes on wire (1896 bits), 237 bytes captured (1896 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::5, Dst: fe80::4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1116, Ack: 846, Len: 163
+Zilla Frame
+ Frame Type ID: 0x00000005
+ Frame Type: FLUSH
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000608
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000004
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: REP
+ Sequence: 301
+ Acknowledge: 302
+ Maximum: 3344
+ Timestamp: 0x000000000000000e
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+ Budget ID: 0x0000000000003300
+ Reserved: 13059
+
+Frame 21: 225 bytes on wire (1800 bits), 225 bytes captured (1800 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::4, Dst: fe80::5
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 846, Ack: 1279, Len: 151
+Zilla Frame
+ Frame Type ID: 0x00000004
+ Frame Type: ABORT
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000660
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000005
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: INI
+ Sequence: 401
+ Acknowledge: 402
+ Maximum: 4477
+ Timestamp: 0x000000000000000f
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+
+Frame 22: 225 bytes on wire (1800 bits), 225 bytes captured (1800 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::7, Dst: fe80::6
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 1, Len: 151
+Zilla Frame
+ Frame Type ID: 0x40000001
+ Frame Type: RESET
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x000006b0
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000006
+ Initial ID: 0x0000000000000007
+ Reply ID: 0x0000000000000006
+ Direction: REP
+ Sequence: 501
+ Acknowledge: 502
+ Maximum: 5577
+ Timestamp: 0x0000000000000010
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+
+Frame 23: 225 bytes on wire (1800 bits), 225 bytes captured (1800 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::4, Dst: fe80::5
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 997, Ack: 1279, Len: 151
+Zilla Frame
+ Frame Type ID: 0x00000003
+ Frame Type: END
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000700
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000005
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: INI
+ Sequence: 701
+ Acknowledge: 702
+ Maximum: 7777
+ Timestamp: 0x0000000000000011
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+
+Frame 24: 225 bytes on wire (1800 bits), 225 bytes captured (1800 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::5, Dst: fe80::4
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1279, Ack: 1148, Len: 151
+Zilla Frame
+ Frame Type ID: 0x00000003
+ Frame Type: END
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000750
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000004
+ Initial ID: 0x0000000000000005
+ Reply ID: 0x0000000000000004
+ Direction: REP
+ Sequence: 703
+ Acknowledge: 704
+ Maximum: 4444
+ Timestamp: 0x0000000000000012
+ Trace ID: 0x0000000000000003
+ Authorization: 0x0000000000000000
+
+Frame 25: 280 bytes on wire (2240 bits), 280 bytes captured (2240 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::8, Dst: fe80::9
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 206
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x084b20e1
+ Protocol Type: kafka
+ Worker: 0
+ Offset: 0x000007a0
+ Origin ID: 0x0000000900000011
+ Origin Namespace: example
+ Origin Binding: south_kafka_client
+ Routed ID: 0x0000000900000012
+ Routed Namespace: example
+ Routed Binding: south_tcp_client
+ Stream ID: 0x0000000000000009
+ Initial ID: 0x0000000000000009
+ Reply ID: 0x0000000000000008
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000013
+ Trace ID: 0x0000000000000009
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+ Extension: proxy
+ Stream Type ID: 0x50a8ce8d
+ Stream Type: proxy
+ Address: INET
+ Family: INET (0)
+ Protocol: STREAM (0)
+ Source: 192.168.0.77
+ Source Port: 12345
+ Destination: 192.168.0.42
+ Destination Port: 442
+ Info (0 items)
+ Length: 4
+ Size: 0
+
+Frame 26: 342 bytes on wire (2736 bits), 342 bytes captured (2736 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::8, Dst: fe80::9
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 268
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x084b20e1
+ Protocol Type: kafka
+ Worker: 0
+ Offset: 0x00000828
+ Origin ID: 0x0000000900000011
+ Origin Namespace: example
+ Origin Binding: south_kafka_client
+ Routed ID: 0x0000000900000012
+ Routed Namespace: example
+ Routed Binding: south_tcp_client
+ Stream ID: 0x0000000000000009
+ Initial ID: 0x0000000000000009
+ Reply ID: 0x0000000000000008
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000014
+ Trace ID: 0x0000000000000009
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+ Extension: proxy
+ Stream Type ID: 0x50a8ce8d
+ Stream Type: proxy
+ Address: INET4
+ Family: INET4 (1)
+ Protocol: STREAM (0)
+ Source: 192.168.0.1
+ Source Port: 32768
+ Destination: 192.168.0.254
+ Destination Port: 443
+ Info (9 items)
+ Length: 86
+ Size: 9
+ Info: ALPN: alpn
+ Type: ALPN (0x01)
+ Length: 4
+ Value: alpn
+ Info: AUTHORITY: authority
+ Type: AUTHORITY (0x02)
+ Length: 9
+ Value: authority
+ Info: IDENTITY: 0x12345678
+ Type: IDENTITY (0x05)
+ Length: 4
+ Value: 12345678
+ Info: NAMESPACE: namespace
+ Type: NAMESPACE (0x30)
+ Length: 9
+ Value: namespace
+ Info: SECURE: VERSION: TLSv1.3
+ Type: SECURE (0x20)
+ Secure Type: VERSION (0x21)
+ Length: 7
+ Value: TLSv1.3
+ Info: SECURE: NAME: name
+ Type: SECURE (0x20)
+ Secure Type: NAME (0x22)
+ Length: 4
+ Value: name
+ Info: SECURE: CIPHER: cipher
+ Type: SECURE (0x20)
+ Secure Type: CIPHER (0x23)
+ Length: 6
+ Value: cipher
+ Info: SECURE: SIGNATURE: signature
+ Type: SECURE (0x20)
+ Secure Type: SIGNATURE (0x24)
+ Length: 9
+ Value: signature
+ Info: SECURE: KEY: key
+ Type: SECURE (0x20)
+ Secure Type: KEY (0x25)
+ Length: 3
+ Value: key
+
+Frame 27: 284 bytes on wire (2272 bits), 284 bytes captured (2272 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::8, Dst: fe80::9
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 210
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x084b20e1
+ Protocol Type: kafka
+ Worker: 0
+ Offset: 0x000008e8
+ Origin ID: 0x0000000900000011
+ Origin Namespace: example
+ Origin Binding: south_kafka_client
+ Routed ID: 0x0000000900000012
+ Routed Namespace: example
+ Routed Binding: south_tcp_client
+ Stream ID: 0x0000000000000009
+ Initial ID: 0x0000000000000009
+ Reply ID: 0x0000000000000008
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000015
+ Trace ID: 0x0000000000000009
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+ Extension: proxy
+ Stream Type ID: 0x50a8ce8d
+ Stream Type: proxy
+ Address: INET6
+ Family: INET6 (2)
+ Protocol: STREAM (0)
+ Source: fd12:3456:789a:1::1
+ Source Port: 32768
+ Destination: fd12:3456:789a:1::fe
+ Destination Port: 443
+ Info (0 items)
+ Length: 4
+ Size: 0
+
+Frame 28: 464 bytes on wire (3712 bits), 464 bytes captured (3712 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::8, Dst: fe80::9
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 390
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x084b20e1
+ Protocol Type: kafka
+ Worker: 0
+ Offset: 0x00000970
+ Origin ID: 0x0000000900000011
+ Origin Namespace: example
+ Origin Binding: south_kafka_client
+ Routed ID: 0x0000000900000012
+ Routed Namespace: example
+ Routed Binding: south_tcp_client
+ Stream ID: 0x0000000000000009
+ Initial ID: 0x0000000000000009
+ Reply ID: 0x0000000000000008
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000016
+ Trace ID: 0x0000000000000009
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+ Extension: proxy
+ Stream Type ID: 0x50a8ce8d
+ Stream Type: proxy
+ Address: UNIX
+ Family: UNIX (3)
+ Protocol: DATAGRAM (1)
+ Source: unix-source
+ Destination: unix-destination
+ Info (0 items)
+ Length: 4
+ Size: 0
+
+Frame 29: 247 bytes on wire (1976 bits), 247 bytes captured (1976 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::8, Dst: fe80::9
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 173
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x084b20e1
+ Protocol Type: kafka
+ Worker: 0
+ Offset: 0x00000ab0
+ Origin ID: 0x0000000900000011
+ Origin Namespace: example
+ Origin Binding: south_kafka_client
+ Routed ID: 0x0000000900000012
+ Routed Namespace: example
+ Routed Binding: south_tcp_client
+ Stream ID: 0x0000000000000009
+ Initial ID: 0x0000000000000009
+ Reply ID: 0x0000000000000008
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000017
+ Trace ID: 0x0000000000000009
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+ Extension: proxy
+ Stream Type ID: 0x50a8ce8d
+ Stream Type: proxy
+ Address: NONE
+ Family: NONE (4)
+ Info (0 items)
+ Length: 4
+ Size: 0
+
+Frame 30: 286 bytes on wire (2288 bits), 286 bytes captured (2288 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::10, Dst: fe80::11
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 212
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000b18
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000011
+ Initial ID: 0x0000000000000011
+ Reply ID: 0x0000000000000010
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000018
+ Trace ID: 0x0000000000000011
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+ Extension: http
+ Stream Type ID: 0x4620b68a
+ Stream Type: http
+ Headers (3 items)
+ Length: 45
+ Size: 3
+ Header: :scheme: http
+ Length: 7
+ Name: :scheme
+ Length: 4
+ Value: http
+ Header: :method: GET
+ Length: 7
+ Name: :method
+ Length: 3
+ Value: GET
+ Header: :path: /hello
+ Length: 5
+ Name: :path
+ Length: 6
+ Value: /hello
+
+Frame 31: 278 bytes on wire (2224 bits), 278 bytes captured (2224 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::10, Dst: fe80::11
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 212, Ack: 1, Len: 204
+Zilla Frame
+ Frame Type ID: 0x40000004
+ Frame Type: CHALLENGE
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000ba8
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000011
+ Initial ID: 0x0000000000000011
+ Reply ID: 0x0000000000000010
+ Direction: INI
+ Sequence: 201
+ Acknowledge: 202
+ Maximum: 22222
+ Timestamp: 0x0000000000000019
+ Trace ID: 0x0000000000000011
+ Authorization: 0x0000000000007742
+ Extension: http
+ Stream Type ID: 0x4620b68a
+ Stream Type: http
+ Headers (3 items)
+ Length: 45
+ Size: 3
+ Header: :scheme: http
+ Length: 7
+ Name: :scheme
+ Length: 4
+ Value: http
+ Header: :method: GET
+ Length: 7
+ Name: :method
+ Length: 3
+ Value: GET
+ Header: :path: /hello
+ Length: 5
+ Name: :path
+ Length: 6
+ Value: /hello
+
+Frame 32: 298 bytes on wire (2384 bits), 298 bytes captured (2384 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::11, Dst: fe80::10
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 416, Len: 224
+Zilla Frame
+ Frame Type ID: 0x00000005
+ Frame Type: FLUSH
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000c30
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000010
+ Initial ID: 0x0000000000000011
+ Reply ID: 0x0000000000000010
+ Direction: REP
+ Sequence: 301
+ Acknowledge: 302
+ Maximum: 3344
+ Timestamp: 0x0000000000000020
+ Trace ID: 0x0000000000000011
+ Authorization: 0x0000000000000000
+ Budget ID: 0x0000000000000000
+ Reserved: 0
+ Extension: http
+ Stream Type ID: 0x4620b68a
+ Stream Type: http
+ Promise ID: 0x0000000000000042
+ Promises (3 items)
+ Length: 45
+ Size: 3
+ Promise: :scheme: http
+ Length: 7
+ Name: :scheme
+ Length: 4
+ Value: http
+ Promise: :method: GET
+ Length: 7
+ Name: :method
+ Length: 3
+ Value: GET
+ Promise: :path: /hello
+ Length: 5
+ Name: :path
+ Length: 6
+ Value: /hello
+
+Frame 33: 278 bytes on wire (2224 bits), 278 bytes captured (2224 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::11, Dst: fe80::10
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 225, Ack: 416, Len: 204
+Zilla Frame
+ Frame Type ID: 0x40000001
+ Frame Type: RESET
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000cc8
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000010
+ Initial ID: 0x0000000000000011
+ Reply ID: 0x0000000000000010
+ Direction: REP
+ Sequence: 501
+ Acknowledge: 502
+ Maximum: 5577
+ Timestamp: 0x0000000000000021
+ Trace ID: 0x0000000000000011
+ Authorization: 0x0000000000000000
+ Extension: http
+ Stream Type ID: 0x4620b68a
+ Stream Type: http
+ Headers (3 items)
+ Length: 45
+ Size: 3
+ Header: :scheme: http
+ Length: 7
+ Name: :scheme
+ Length: 4
+ Value: http
+ Header: :method: GET
+ Length: 7
+ Name: :method
+ Length: 3
+ Value: GET
+ Header: :path: /hello
+ Length: 5
+ Name: :path
+ Length: 6
+ Value: /hello
+
+Frame 34: 278 bytes on wire (2224 bits), 278 bytes captured (2224 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::10, Dst: fe80::11
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 416, Ack: 429, Len: 204
+Zilla Frame
+ Frame Type ID: 0x00000003
+ Frame Type: END
+ Protocol Type ID: 0x8ab62046
+ Protocol Type: http
+ Worker: 0
+ Offset: 0x00000d50
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000d
+ Routed Namespace: example
+ Routed Binding: north_http_server
+ Stream ID: 0x0000000000000011
+ Initial ID: 0x0000000000000011
+ Reply ID: 0x0000000000000010
+ Direction: INI
+ Sequence: 742
+ Acknowledge: 427
+ Maximum: 60000
+ Timestamp: 0x0000000000000022
+ Trace ID: 0x0000000000000011
+ Authorization: 0x0000000000000000
+ Extension: http
+ Stream Type ID: 0x4620b68a
+ Stream Type: http
+ Trailers (3 items)
+ Length: 45
+ Size: 3
+ Trailer: :scheme: http
+ Length: 7
+ Name: :scheme
+ Length: 4
+ Value: http
+ Trailer: :method: GET
+ Length: 7
+ Name: :method
+ Length: 3
+ Value: GET
+ Trailer: :path: /hello
+ Length: 5
+ Name: :path
+ Length: 6
+ Value: /hello
+
+Frame 35: 369 bytes on wire (2952 bits), 369 bytes captured (2952 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::12, Dst: fe80::13
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 295
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x00000dd8
+ Origin ID: 0x000000090000001a
+ Origin Namespace: example
+ Origin Binding: north_grpc_server
+ Routed ID: 0x000000090000001b
+ Routed Namespace: example
+ Routed Binding: north_grpc_kafka_mapping
+ Stream ID: 0x0000000000000013
+ Initial ID: 0x0000000000000013
+ Reply ID: 0x0000000000000012
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000023
+ Trace ID: 0x0000000000000013
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+ Extension: grpc
+ Stream Type ID: 0x3a58c7f9
+ Stream Type: grpc
+ Scheme: http
+ Length: 4
+ Scheme: http
+ Authority: localhost:7153
+ Length: 14
+ Authority: localhost:7153
+ Service: example.EchoService
+ Length: 19
+ Service: example.EchoService
+ Method: EchoUnary
+ Length: 9
+ Method: EchoUnary
+ Metadata (3 items)
+ Length: 66
+ Size: 3
+ Metadata: [TEXT] grpc-accept-encoding: gzip
+ Type: TEXT (0)
+ Length (Varint): 28
+ Length: 20
+ Name: grpc-accept-encoding
+ Length (Varint): 08
+ Length: 4
+ Value: gzip
+ Metadata: [TEXT] metadata-2: hello
+ Type: TEXT (0)
+ Length (Varint): 14
+ Length: 10
+ Name: metadata-2
+ Length (Varint): 0a
+ Length: 5
+ Value: hello
+ Metadata: [BASE64] metadata-3: 4242
+ Type: BASE64 (1)
+ Length (Varint): 14
+ Length: 10
+ Name: metadata-3
+ Length (Varint): 08
+ Length: 4
+ Value: 4242
+
+Frame 36: 539 bytes on wire (4312 bits), 539 bytes captured (4312 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::13, Dst: fe80::12
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 1, Ack: 295, Len: 465
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x00000eb0
+ Origin ID: 0x000000090000001a
+ Origin Namespace: example
+ Origin Binding: north_grpc_server
+ Routed ID: 0x000000090000001b
+ Routed Namespace: example
+ Routed Binding: north_grpc_kafka_mapping
+ Stream ID: 0x0000000000000012
+ Initial ID: 0x0000000000000013
+ Reply ID: 0x0000000000000012
+ Direction: REP
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000024
+ Trace ID: 0x0000000000000013
+ Authorization: 0x0000000000000000
+ Affinity: 0x0000000000000000
+ Extension: grpc
+ Stream Type ID: 0x3a58c7f9
+ Stream Type: grpc
+ Scheme: http
+ Length: 4
+ Scheme: http
+ Authority: localhost:7153
+ Length: 14
+ Authority: localhost:7153
+ Service: example.EchoService
+ Length: 19
+ Service: example.EchoService
+ Method: EchoUnary
+ Length: 9
+ Method: EchoUnary
+ Metadata (2 items)
+ Length: 236
+ Size: 2
+ Metadata: [TEXT] long field: ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
+ Type: TEXT (0)
+ Length (Varint): 14
+ Length: 10
+ Name: long field
+ Length (Varint): 9003
+ Length: 200
+ Value: ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
+ Metadata: [TEXT] metadata-2: hello
+ Type: TEXT (0)
+ Length (Varint): 14
+ Length: 10
+ Name: metadata-2
+ Length (Varint): 0a
+ Length: 5
+ Value: hello
+
+Frame 37: 258 bytes on wire (2064 bits), 258 bytes captured (2064 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::12, Dst: fe80::13
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 295, Ack: 466, Len: 184
+Zilla Frame
+ Frame Type ID: 0x00000002
+ Frame Type: DATA
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x00001030
+ Origin ID: 0x000000090000001a
+ Origin Namespace: example
+ Origin Binding: north_grpc_server
+ Routed ID: 0x000000090000001b
+ Routed Namespace: example
+ Routed Binding: north_grpc_kafka_mapping
+ Stream ID: 0x0000000000000013
+ Initial ID: 0x0000000000000013
+ Reply ID: 0x0000000000000012
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000025
+ Trace ID: 0x0000000000000013
+ Authorization: 0x0000000000000000
+ Flags: 0x03
+ .... ...1 = FIN: Set (1)
+ .... ..1. = INIT: Set (1)
+ .... .0.. = INCOMPLETE: Not set (0)
+ .... 0... = SKIP: Not set (0)
+ Budget ID: 0x0000000000000013
+ Reserved: 66
+ Progress: 66
+ Progress/Maximum: 66/0
+ Payload
+ Length: -1
+ Extension: grpc
+ Stream Type ID: 0x3a58c7f9
+ Stream Type: grpc
+ Deferred: 42
+
+Frame 38: 258 bytes on wire (2064 bits), 258 bytes captured (2064 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::13, Dst: fe80::12
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 466, Ack: 479, Len: 184
+Zilla Frame
+ Frame Type ID: 0x00000002
+ Frame Type: DATA
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x00001098
+ Origin ID: 0x000000090000001a
+ Origin Namespace: example
+ Origin Binding: north_grpc_server
+ Routed ID: 0x000000090000001b
+ Routed Namespace: example
+ Routed Binding: north_grpc_kafka_mapping
+ Stream ID: 0x0000000000000012
+ Initial ID: 0x0000000000000013
+ Reply ID: 0x0000000000000012
+ Direction: REP
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000026
+ Trace ID: 0x0000000000000013
+ Authorization: 0x0000000000000000
+ Flags: 0x03
+ .... ...1 = FIN: Set (1)
+ .... ..1. = INIT: Set (1)
+ .... .0.. = INCOMPLETE: Not set (0)
+ .... 0... = SKIP: Not set (0)
+ Budget ID: 0x0000000000000013
+ Reserved: 66
+ Progress: 66
+ Progress/Maximum: 66/0
+ Payload
+ Length: 0
+ Extension: grpc
+ Stream Type ID: 0x3a58c7f9
+ Stream Type: grpc
+ Deferred: 77
+
+Frame 39: 270 bytes on wire (2160 bits), 270 bytes captured (2160 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::12, Dst: fe80::13
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 479, Ack: 650, Len: 196
+Zilla Frame
+ Frame Type ID: 0x00000002
+ Frame Type: DATA
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x00001100
+ Origin ID: 0x000000090000001a
+ Origin Namespace: example
+ Origin Binding: north_grpc_server
+ Routed ID: 0x000000090000001b
+ Routed Namespace: example
+ Routed Binding: north_grpc_kafka_mapping
+ Stream ID: 0x0000000000000013
+ Initial ID: 0x0000000000000013
+ Reply ID: 0x0000000000000012
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000027
+ Trace ID: 0x0000000000000013
+ Authorization: 0x0000000000000000
+ Flags: 0x03
+ .... ...1 = FIN: Set (1)
+ .... ..1. = INIT: Set (1)
+ .... .0.. = INCOMPLETE: Not set (0)
+ .... 0... = SKIP: Not set (0)
+ Budget ID: 0x0000000000000013
+ Reserved: 66
+ Progress: 66
+ Progress/Maximum: 66/0
+ Payload
+ Length: 12
+ Payload
+ Extension: grpc
+ Stream Type ID: 0x3a58c7f9
+ Stream Type: grpc
+ Deferred: 88
+
+Frame 40: 246 bytes on wire (1968 bits), 246 bytes captured (1968 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::12, Dst: fe80::13
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 675, Ack: 650, Len: 172
+Zilla Frame
+ Frame Type ID: 0x00000004
+ Frame Type: ABORT
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x00001178
+ Origin ID: 0x000000090000001a
+ Origin Namespace: example
+ Origin Binding: north_grpc_server
+ Routed ID: 0x000000090000001b
+ Routed Namespace: example
+ Routed Binding: north_grpc_kafka_mapping
+ Stream ID: 0x0000000000000013
+ Initial ID: 0x0000000000000013
+ Reply ID: 0x0000000000000012
+ Direction: INI
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000028
+ Trace ID: 0x0000000000000013
+ Authorization: 0x0000000000000000
+ Extension: grpc
+ Stream Type ID: 0x3a58c7f9
+ Stream Type: grpc
+ Status: aborted
+ Length: 7
+ Status: aborted
+
+Frame 41: 244 bytes on wire (1952 bits), 244 bytes captured (1952 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::13, Dst: fe80::12
+Transmission Control Protocol, Src Port: 7114, Dst Port: 0, Seq: 650, Ack: 847, Len: 170
+Zilla Frame
+ Frame Type ID: 0x40000001
+ Frame Type: RESET
+ Protocol Type ID: 0x00000000
+ Protocol Type:
+ Worker: 0
+ Offset: 0x000011d8
+ Origin ID: 0x000000090000001a
+ Origin Namespace: example
+ Origin Binding: north_grpc_server
+ Routed ID: 0x000000090000001b
+ Routed Namespace: example
+ Routed Binding: north_grpc_kafka_mapping
+ Stream ID: 0x0000000000000012
+ Initial ID: 0x0000000000000013
+ Reply ID: 0x0000000000000012
+ Direction: REP
+ Sequence: 0
+ Acknowledge: 0
+ Maximum: 0
+ Timestamp: 0x0000000000000029
+ Trace ID: 0x0000000000000013
+ Authorization: 0x0000000000000000
+ Extension: grpc
+ Stream Type ID: 0x3a58c7f9
+ Stream Type: grpc
+ Status: reset
+ Length: 5
+ Status: reset
+
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_filtered_dump.pcap b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_filtered_dump.pcap
new file mode 100644
index 0000000000..ef04621b3d
Binary files /dev/null and b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_filtered_dump.pcap differ
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_filtered_dump.txt b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_filtered_dump.txt
new file mode 100644
index 0000000000..e4c943083e
--- /dev/null
+++ b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/airline/expected_filtered_dump.txt
@@ -0,0 +1,29 @@
+Frame 1: 232 bytes on wire (1856 bits), 232 bytes captured (1856 bits)
+Ethernet II, Src: Send_00 (20:53:45:4e:44:00), Dst: Receive_00 (20:52:45:43:56:00)
+Internet Protocol Version 6, Src: fe80::76, Dst: fe80::77
+Transmission Control Protocol, Src Port: 0, Dst Port: 7114, Seq: 0, Ack: 1, Len: 158
+Zilla Frame
+ Frame Type ID: 0x00000001
+ Frame Type: BEGIN
+ Protocol Type ID: 0x99f321bc
+ Protocol Type: tls
+ Worker: 0
+ Offset: 0x00000240
+ Origin ID: 0x000000090000000b
+ Origin Namespace: example
+ Origin Binding: north_tcp_server
+ Routed ID: 0x000000090000000c
+ Routed Namespace: example
+ Routed Binding: north_tls_server
+ Stream ID: 0x0000000000000077
+ Initial ID: 0x0000000000000077
+ Reply ID: 0x0000000000000076
+ Direction: INI
+ Sequence: 71
+ Acknowledge: 72
+ Maximum: 73
+ Timestamp: 0x0000000000000007
+ Trace ID: 0x0000000000004202
+ Authorization: 0x0000000000004203
+ Affinity: 0x0000000000004204
+
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/bindings b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/bindings
deleted file mode 100644
index 68e81ae32e..0000000000
Binary files a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/bindings and /dev/null differ
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/labels b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/labels
deleted file mode 100644
index 22335feb7a..0000000000
--- a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/engine/labels
+++ /dev/null
@@ -1,5 +0,0 @@
-test
-kafka0
-http
-kafka
-http0
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/expected_dump_with_kafka_filter.pcap b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/expected_dump_with_kafka_filter.pcap
deleted file mode 100644
index 3db2237555..0000000000
Binary files a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/expected_dump_with_kafka_filter.pcap and /dev/null differ
diff --git a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/expected_dump_without_filter.pcap b/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/expected_dump_without_filter.pcap
deleted file mode 100644
index 2224cd6a04..0000000000
Binary files a/incubator/command-dump/src/test/resources/io/aklivity/zilla/runtime/command/dump/internal/expected_dump_without_filter.pcap and /dev/null differ
diff --git a/incubator/command-generate/pom.xml b/incubator/command-generate/pom.xml
index 7ae396cd01..f7e8028b22 100644
--- a/incubator/command-generate/pom.xml
+++ b/incubator/command-generate/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/command-log/pom.xml b/incubator/command-log/pom.xml
index d884950916..226d537333 100644
--- a/incubator/command-log/pom.xml
+++ b/incubator/command-log/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/command-tune/pom.xml b/incubator/command-tune/pom.xml
index 0a96331aed..e94f2f1ca7 100644
--- a/incubator/command-tune/pom.xml
+++ b/incubator/command-tune/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/exporter-otlp.spec/pom.xml b/incubator/exporter-otlp.spec/pom.xml
index ec324f56d6..22b33c14d1 100644
--- a/incubator/exporter-otlp.spec/pom.xml
+++ b/incubator/exporter-otlp.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/exporter-otlp.spec/src/main/scripts/io/aklivity/zilla/specs/exporter/otlp/schema/otlp.schema.patch.json b/incubator/exporter-otlp.spec/src/main/scripts/io/aklivity/zilla/specs/exporter/otlp/schema/otlp.schema.patch.json
index 2a4f12b536..3c06e6b4d7 100644
--- a/incubator/exporter-otlp.spec/src/main/scripts/io/aklivity/zilla/specs/exporter/otlp/schema/otlp.schema.patch.json
+++ b/incubator/exporter-otlp.spec/src/main/scripts/io/aklivity/zilla/specs/exporter/otlp/schema/otlp.schema.patch.json
@@ -90,12 +90,12 @@
],
"additionalProperties": false
},
- "required":
- [
- "options"
- ],
"additionalProperties": false
- }
+ },
+ "required":
+ [
+ "options"
+ ]
}
}
}
diff --git a/incubator/exporter-otlp/pom.xml b/incubator/exporter-otlp/pom.xml
index 05ef8452d1..4009769d5f 100644
--- a/incubator/exporter-otlp/pom.xml
+++ b/incubator/exporter-otlp/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/pom.xml b/incubator/pom.xml
index 8a040de069..82bd774216 100644
--- a/incubator/pom.xml
+++ b/incubator/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/validator-avro.spec/pom.xml b/incubator/validator-avro.spec/pom.xml
index 03d0fa8a6b..e5b891fe39 100644
--- a/incubator/validator-avro.spec/pom.xml
+++ b/incubator/validator-avro.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/validator-avro/pom.xml b/incubator/validator-avro/pom.xml
index 33d7796fed..54c02d3ae0 100644
--- a/incubator/validator-avro/pom.xml
+++ b/incubator/validator-avro/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/validator-core.spec/pom.xml b/incubator/validator-core.spec/pom.xml
index c7a3cf7940..16eaaeea38 100644
--- a/incubator/validator-core.spec/pom.xml
+++ b/incubator/validator-core.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/validator-core/pom.xml b/incubator/validator-core/pom.xml
index 242626524b..09af093ec2 100644
--- a/incubator/validator-core/pom.xml
+++ b/incubator/validator-core/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/validator-json.spec/pom.xml b/incubator/validator-json.spec/pom.xml
index 7951f4ddec..4b288cf59c 100644
--- a/incubator/validator-json.spec/pom.xml
+++ b/incubator/validator-json.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/incubator/validator-json/pom.xml b/incubator/validator-json/pom.xml
index 61d71edf9b..b21c7f13ad 100644
--- a/incubator/validator-json/pom.xml
+++ b/incubator/validator-json/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
incubator
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/manager/pom.xml b/manager/pom.xml
index 540e1a1ccd..5ea29e78d9 100644
--- a/manager/pom.xml
+++ b/manager/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/pom.xml b/pom.xml
index 6452c0b39c..b6a4b847b1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -7,7 +7,7 @@
4.0.0
io.aklivity.zilla
zilla
- 0.9.62
+ 0.9.63
pom
zilla
https://github.com/aklivity/zilla
diff --git a/runtime/binding-echo/pom.xml b/runtime/binding-echo/pom.xml
index 2f159513cc..9b4341e7dc 100644
--- a/runtime/binding-echo/pom.xml
+++ b/runtime/binding-echo/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-fan/pom.xml b/runtime/binding-fan/pom.xml
index 075156f1b0..53c223c78a 100644
--- a/runtime/binding-fan/pom.xml
+++ b/runtime/binding-fan/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-filesystem/pom.xml b/runtime/binding-filesystem/pom.xml
index c685b88505..ebba393970 100644
--- a/runtime/binding-filesystem/pom.xml
+++ b/runtime/binding-filesystem/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-grpc-kafka/pom.xml b/runtime/binding-grpc-kafka/pom.xml
index ae0624edd5..920458aa56 100644
--- a/runtime/binding-grpc-kafka/pom.xml
+++ b/runtime/binding-grpc-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-grpc/pom.xml b/runtime/binding-grpc/pom.xml
index 72391c701b..3da727a410 100644
--- a/runtime/binding-grpc/pom.xml
+++ b/runtime/binding-grpc/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-http-filesystem/pom.xml b/runtime/binding-http-filesystem/pom.xml
index 0bb1efaeef..c6346460bb 100644
--- a/runtime/binding-http-filesystem/pom.xml
+++ b/runtime/binding-http-filesystem/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-http-kafka/pom.xml b/runtime/binding-http-kafka/pom.xml
index 85f943b12b..c2a85e612b 100644
--- a/runtime/binding-http-kafka/pom.xml
+++ b/runtime/binding-http-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-http/pom.xml b/runtime/binding-http/pom.xml
index 5b311ebd79..5ffa5f63d4 100644
--- a/runtime/binding-http/pom.xml
+++ b/runtime/binding-http/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-kafka-grpc/pom.xml b/runtime/binding-kafka-grpc/pom.xml
index 34cc4c8e00..452d1bbf47 100644
--- a/runtime/binding-kafka-grpc/pom.xml
+++ b/runtime/binding-kafka-grpc/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-kafka/pom.xml b/runtime/binding-kafka/pom.xml
index badaf9a17e..c5308f0c90 100644
--- a/runtime/binding-kafka/pom.xml
+++ b/runtime/binding-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java
index b82ad9a201..086205beef 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheClientConsumerFactory.java
@@ -453,6 +453,8 @@ final class KafkaCacheClientConsumerFan
private long replySeq;
private long replyAck;
private int replyMax;
+ private String host;
+ private int port;
private KafkaCacheClientConsumerFan(
@@ -720,6 +722,14 @@ private void onConsumerFanReplyBegin(
BeginFW begin)
{
final long traceId = begin.traceId();
+ final OctetsFW extension = begin.extension();
+
+ final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
+ final KafkaBeginExFW kafkaBeginEx = beginEx.typeId() == kafkaTypeId ? extension.get(kafkaBeginExRO::wrap) : null;
+ final KafkaConsumerBeginExFW kafkaConsumerBeginEx = kafkaBeginEx != null ? kafkaBeginEx.consumer() : null;
+
+ host = kafkaConsumerBeginEx.host().asString();
+ port = kafkaConsumerBeginEx.port();
state = KafkaState.openingReply(state);
@@ -1029,7 +1039,16 @@ private void doConsumerReplyBegin(
state = KafkaState.openingReply(state);
doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax,
- traceId, authorization, affinity, EMPTY_EXTENSION);
+ traceId, authorization, affinity, ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l)
+ .typeId(kafkaTypeId)
+ .consumer(c -> c
+ .groupId(fan.groupId)
+ .consumerId(fan.consumerId)
+ .host(fan.host)
+ .port(fan.port)
+ .timeout(fan.timeout)
+ .topic(fan.topic))
+ .build().sizeof()));
}
private void doConsumerReplyDataIfNecessary(
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java
index 8862c28106..9a4d17fa17 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheOffsetFetchFactory.java
@@ -815,7 +815,6 @@ private void onOffsetFetchInitialBegin(
final long sequence = begin.sequence();
final long acknowledge = begin.acknowledge();
final long traceId = begin.traceId();
- final long authorization = begin.authorization();
final long affinity = begin.affinity();
final OctetsFW extension = begin.extension();
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java
index c9cf72831d..3fdb17dccc 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaCacheServerConsumerFactory.java
@@ -557,6 +557,8 @@ final class KafkaCacheServerConsumerFanout
private String leaderId;
private String memberId;
private String instanceId;
+ private String host;
+ private int port;
private int timeout;
private int generationId;
@@ -846,6 +848,8 @@ private void onConsumerReplyBegin(
final KafkaGroupBeginExFW kafkaGroupBeginEx = kafkaBeginEx != null ? kafkaBeginEx.group() : null;
instanceId = kafkaGroupBeginEx.instanceId().asString();
+ host = kafkaGroupBeginEx.host().asString();
+ port = kafkaGroupBeginEx.port();
state = KafkaState.openedReply(state);
@@ -1353,7 +1357,16 @@ private void doConsumerReplyBegin(
state = KafkaState.openingReply(state);
doBegin(sender, originId, routedId, replyId, replySeq, replyAck, replyMax,
- traceId, authorization, affinity, EMPTY_OCTETS);
+ traceId, authorization, affinity, ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l)
+ .typeId(kafkaTypeId)
+ .consumer(c -> c
+ .groupId(fanout.groupId)
+ .consumerId(fanout.consumerId)
+ .host(fanout.host)
+ .port(fanout.port)
+ .timeout(fanout.timeout)
+ .topic(topic))
+ .build().sizeof()));
}
private void doConsumerReplyData(
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java
index 47440a09ce..89b5c15fbf 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientConnectionPool.java
@@ -78,7 +78,6 @@ public final class KafkaClientConnectionPool extends KafkaClientSaslHandshaker
private static final int SIGNAL_STREAM_WINDOW = 0x80000006;
private static final int SIGNAL_CONNECTION_CLEANUP = 0x80000007;
private static final int SIGNAL_NEXT_REQUEST = 0x80000008;
- private static final StringBuilder CLUSTER = new StringBuilder("");
private final BeginFW beginRO = new BeginFW();
private final DataFW dataRO = new DataFW();
@@ -173,7 +172,7 @@ private MessageConsumer newStream(
final ProxyBeginExFW proxyBeginEx = extension.get(proxyBeginExRO::tryWrap);
MessageConsumer newStream = null;
- CLUSTER.setLength(0);
+ final StringBuilder cluster = new StringBuilder();
if (proxyBeginEx != null)
{
@@ -181,21 +180,21 @@ private MessageConsumer newStream(
String host = inet.destination().asString();
int port = inet.destinationPort();
- CLUSTER.append(host);
- CLUSTER.append(":");
- CLUSTER.append(port);
+ cluster.append(host);
+ cluster.append(":");
+ cluster.append(port);
if (proxyBeginEx.infos() != null)
{
proxyBeginEx.infos().forEach(i ->
{
- CLUSTER.append(":");
- CLUSTER.append(i.authority().asString());
+ cluster.append(":");
+ cluster.append(i.authority().asString());
});
}
}
- final KafkaClientConnection connection = connectionPool.computeIfAbsent(CLUSTER.toString(), s ->
+ final KafkaClientConnection connection = connectionPool.computeIfAbsent(cluster.toString(), s ->
newConnection(originId, routedId, authorization));
newStream = connection.newStream(msgTypeId, buffer, index, length, sender);
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java
index b796f407cf..0ef75d5307 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientGroupFactory.java
@@ -95,7 +95,6 @@
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW;
-import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFlushExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupBeginExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaGroupFlushExFW;
@@ -172,7 +171,6 @@ public final class KafkaClientGroupFactory extends KafkaClientSaslHandshaker imp
private final ResetFW.Builder resetRW = new ResetFW.Builder();
private final WindowFW.Builder windowRW = new WindowFW.Builder();
private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder();
- private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder();
private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder();
private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder();
private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder();
@@ -1405,7 +1403,7 @@ private void onStreamFlush(
final long sequence = flush.sequence();
final long acknowledge = flush.acknowledge();
final long traceId = flush.traceId();
- final long authorizationId = flush.authorization();
+ final long budgetId = flush.budgetId();
final int reserved = flush.reserved();
final OctetsFW extension = flush.extension();
@@ -1440,7 +1438,14 @@ private void onStreamFlush(
}
});
- coordinatorClient.doJoinGroupRequest(traceId);
+ if (host != null)
+ {
+ coordinatorClient.doJoinGroupRequest(traceId);
+ }
+ else
+ {
+ clusterClient.doEncodeRequestIfNecessary(traceId, budgetId);
+ }
}
else
{
@@ -1525,6 +1530,8 @@ private void doStreamBegin(
.groupId(groupId)
.protocol(protocol)
.instanceId(groupMembership.instanceId)
+ .host(host)
+ .port(port)
.timeout(timeout))
.build();
@@ -2709,6 +2716,7 @@ private void doNetworkBegin(
final KafkaClientRoute clientRoute = supplyClientRoute.apply(routedId);
final KafkaBrokerInfo broker = clientRoute.brokers.get(Long.parseLong(delegate.nodeId));
+
if (broker != null)
{
extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l)
@@ -4008,7 +4016,7 @@ private void doJoinGroupRequest(
encoders.add(encodeJoinGroupRequest);
signaler.signalNow(originId, routedId, initialId, traceId, SIGNAL_NEXT_REQUEST, 0);
}
- else
+ else if (delegate.host != null)
{
delegate.doStreamBeginIfNecessary(traceId, authorization);
}
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java
index 418147253e..fea3d3ec1f 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientOffsetFetchFactory.java
@@ -15,6 +15,7 @@
*/
package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+import static io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressProtocol.STREAM;
import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT;
import static java.util.Objects.requireNonNull;
@@ -53,6 +54,7 @@
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaOffsetFetchBeginExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW;
@@ -93,7 +95,7 @@ public final class KafkaClientOffsetFetchFactory extends KafkaClientSaslHandshak
private final AbortFW.Builder abortRW = new AbortFW.Builder();
private final ResetFW.Builder resetRW = new ResetFW.Builder();
private final WindowFW.Builder windowRW = new WindowFW.Builder();
- private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder();
+ private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder();
private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder();
private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder();
@@ -127,6 +129,7 @@ public final class KafkaClientOffsetFetchFactory extends KafkaClientSaslHandshak
private final KafkaOffsetFetchClientDecoder decodeReject = this::decodeReject;
private final int kafkaTypeId;
+ private final int proxyTypeId;
private final MutableDirectBuffer writeBuffer;
private final MutableDirectBuffer extBuffer;
private final BufferPool decodePool;
@@ -142,6 +145,7 @@ public KafkaClientOffsetFetchFactory(
{
super(config, context);
this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME);
+ this.proxyTypeId = context.supplyTypeId("proxy");
this.signaler = context.signaler();
this.streamFactory = context.streamFactory();
this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]);
@@ -173,6 +177,8 @@ public MessageConsumer newStream(
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_OFFSET_FETCH;
final KafkaOffsetFetchBeginExFW kafkaOffsetFetchBeginEx = kafkaBeginEx.offsetFetch();
final String groupId = kafkaOffsetFetchBeginEx.groupId().asString();
+ final String host = kafkaOffsetFetchBeginEx.host().asString();
+ final int port = kafkaOffsetFetchBeginEx.port();
final String topic = kafkaOffsetFetchBeginEx.topic().asString();
IntHashSet partitions = new IntHashSet();
kafkaOffsetFetchBeginEx.partitions().forEach(p -> partitions.add(p.partitionId()));
@@ -196,6 +202,8 @@ public MessageConsumer newStream(
affinity,
resolvedId,
groupId,
+ host,
+ port,
topic,
partitions,
sasl)::onApplication;
@@ -757,6 +765,8 @@ private final class KafkaOffsetFetchStream
long affinity,
long resolvedId,
String groupId,
+ String host,
+ int port,
String topic,
IntHashSet partitions,
KafkaSaslConfig sasl)
@@ -767,7 +777,8 @@ private final class KafkaOffsetFetchStream
this.initialId = initialId;
this.replyId = supplyReplyId.applyAsLong(initialId);
this.affinity = affinity;
- this.client = new KafkaOffsetFetchClient(this, routedId, resolvedId, groupId, topic, partitions, sasl);
+ this.client = new KafkaOffsetFetchClient(this, routedId, resolvedId, groupId, host, port,
+ topic, partitions, sasl);
}
private void onApplication(
@@ -1020,6 +1031,8 @@ private final class KafkaOffsetFetchClient extends KafkaSaslClient
private final KafkaOffsetFetchStream delegate;
private final String groupId;
+ private final String host;
+ private final int port;
private final String topic;
private final IntHashSet partitions;
private final ObjectHashSet topicPartitions;
@@ -1060,6 +1073,8 @@ private final class KafkaOffsetFetchClient extends KafkaSaslClient
long originId,
long routedId,
String groupId,
+ String host,
+ int port,
String topic,
IntHashSet partitions,
KafkaSaslConfig sasl)
@@ -1067,6 +1082,8 @@ private final class KafkaOffsetFetchClient extends KafkaSaslClient
super(sasl, originId, routedId);
this.delegate = delegate;
this.groupId = requireNonNull(groupId);
+ this.host = host;
+ this.port = port;
this.topic = topic;
this.partitions = partitions;
this.topicPartitions = new ObjectHashSet<>();
@@ -1274,8 +1291,19 @@ private void doNetworkBegin(
{
state = KafkaState.openingInitial(state);
+ Consumer extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l)
+ .typeId(proxyTypeId)
+ .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM))
+ .source("0.0.0.0")
+ .destination(host)
+ .sourcePort(0)
+ .destinationPort(port)))
+ .infos(i -> i.item(ii -> ii.authority(host)))
+ .build()
+ .sizeof());
+
network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax,
- traceId, authorization, affinity, EMPTY_EXTENSION);
+ traceId, authorization, affinity, extension);
}
@Override
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java
index a813fa89d7..b2fb009d15 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java
@@ -1313,6 +1313,8 @@ private void onNetworkBegin(
stream.doApplicationBeginIfNecessary(traceId, authorization, topic, partitionId);
}
+ private long networkBytesReceived;
+
private void onNetworkData(
DataFW data)
{
@@ -1324,6 +1326,7 @@ private void onNetworkData(
assert acknowledge <= sequence;
assert sequence >= replySeq;
+ networkBytesReceived += Math.max(data.length(), 0);
authorization = data.authorization();
replySeq = sequence + data.reserved();
@@ -1385,7 +1388,8 @@ private void onNetworkAbort(
if (KafkaConfiguration.DEBUG)
{
- System.out.format("[client] %s[%s] PRODUCE aborted (%d bytes)\n", topic, partitionId);
+ System.out.format("[client] %s[%s] PRODUCE aborted (%d bytes)\n",
+ topic, partitionId, network, networkBytesReceived);
}
state = KafkaState.closedReply(state);
@@ -1400,7 +1404,8 @@ private void onNetworkReset(
if (KafkaConfiguration.DEBUG)
{
- System.out.format("[client] %s[%d] PRODUCE reset (%d bytes)\n", topic, partitionId);
+ System.out.format("[client] %s[%d] PRODUCE reset (%d bytes)\n",
+ topic, partitionId, networkBytesReceived);
}
state = KafkaState.closedInitial(state);
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java
index 80887cd342..651d33e4ac 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaMergedFactory.java
@@ -75,6 +75,7 @@
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.FlushFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerAssignmentFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerBeginExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerDataExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaConsumerFlushExFW;
import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDataExFW;
@@ -2776,6 +2777,8 @@ private final class KafkaUnmergedConsumerStream
private long replySeq;
private long replyAck;
private int replyMax;
+ private String host;
+ private int port;
private KafkaUnmergedConsumerStream(
KafkaMergedStream merged)
@@ -2926,9 +2929,17 @@ private void onConsumerReplyBegin(
BeginFW begin)
{
final long traceId = begin.traceId();
+ final OctetsFW extension = begin.extension();
state = KafkaState.openingReply(state);
+ final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
+ final KafkaBeginExFW kafkaBeginEx = beginEx.typeId() == kafkaTypeId ? extension.get(kafkaBeginExRO::wrap) : null;
+ final KafkaConsumerBeginExFW kafkaConsumerBeginEx = kafkaBeginEx != null ? kafkaBeginEx.consumer() : null;
+
+ host = kafkaConsumerBeginEx.host().asString();
+ port = kafkaConsumerBeginEx.port();
+
doConsumerReplyWindow(traceId, 0, 8192);
}
@@ -3148,6 +3159,8 @@ private void doOffsetFetchInitialBegin(
.typeId(kafkaTypeId)
.offsetFetch(c -> c
.groupId(merged.groupId)
+ .host(merged.consumerStream.host)
+ .port(merged.consumerStream.port)
.topic(merged.topic)
.partitions(p -> merged.leadersByAssignedId.forEach((k, v) ->
p.item(tp -> tp.partitionId(k))))
diff --git a/runtime/binding-mqtt-kafka/pom.xml b/runtime/binding-mqtt-kafka/pom.xml
index b6c50a58d9..6de7db6daa 100644
--- a/runtime/binding-mqtt-kafka/pom.xml
+++ b/runtime/binding-mqtt-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java
index 71be641871..4032e365a1 100644
--- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java
+++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/config/MqttKafkaHeaderHelper.java
@@ -34,7 +34,7 @@ public class MqttKafkaHeaderHelper
private static final String KAFKA_LOCAL_HEADER_NAME = "zilla:local";
private static final String KAFKA_QOS_HEADER_NAME = "zilla:qos";
- private static final String KAFKA_TIMEOUT_HEADER_NAME = "zilla:timeout-ms";
+ private static final String KAFKA_TIMEOUT_HEADER_NAME = "zilla:expiry";
private static final String KAFKA_CONTENT_TYPE_HEADER_NAME = "zilla:content-type";
diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java
index f99f590726..40d74512f3 100644
--- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java
+++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaPublishFactory.java
@@ -386,7 +386,7 @@ private void onMqttData(
if (mqttPublishDataEx.expiryInterval() != -1)
{
final MutableDirectBuffer expiryBuffer = new UnsafeBuffer(new byte[4]);
- expiryBuffer.putInt(0, mqttPublishDataEx.expiryInterval() * 1000, ByteOrder.BIG_ENDIAN);
+ expiryBuffer.putInt(0, mqttPublishDataEx.expiryInterval(), ByteOrder.BIG_ENDIAN);
kafkaHeadersRW.item(h ->
{
h.nameLen(helper.kafkaTimeoutHeaderName.sizeof());
diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java
index 5f0613beaa..ebc439f253 100644
--- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java
+++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionFactory.java
@@ -32,6 +32,7 @@
import org.agrona.DirectBuffer;
import org.agrona.MutableDirectBuffer;
+import org.agrona.collections.Int2IntHashMap;
import org.agrona.collections.Int2ObjectHashMap;
import org.agrona.collections.IntHashSet;
import org.agrona.collections.Long2ObjectHashMap;
@@ -133,6 +134,33 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory
private static final int MQTT_KAFKA_CAPABILITIES = RETAIN_AVAILABLE_MASK | WILDCARD_AVAILABLE_MASK |
SUBSCRIPTION_IDS_AVAILABLE_MASK;
public static final String GROUPID_SESSION_SUFFIX = "session";
+ public static final Int2IntHashMap MQTT_REASON_CODES;
+ public static final Int2ObjectHashMap MQTT_REASONS;
+ public static final int GROUP_AUTH_FAILED_ERROR_CODE = 30;
+ public static final int INVALID_DESCRIBE_CONFIG_ERROR_CODE = 35;
+ public static final int INVALID_SESSION_TIMEOUT_ERROR_CODE = 26;
+ public static final int MQTT_NOT_AUTHORIZED = 0x87;
+ public static final int MQTT_IMPLEMENTATION_SPECIFIC_ERROR = 0x83;
+ public static final String MQTT_INVALID_SESSION_TIMEOUT_REASON = "Invalid session expiry interval";
+ private static final String16FW EMPTY_STRING = new String16FW("");
+
+ static
+ {
+ final Int2IntHashMap reasonCodes = new Int2IntHashMap(MQTT_IMPLEMENTATION_SPECIFIC_ERROR);
+
+ reasonCodes.put(GROUP_AUTH_FAILED_ERROR_CODE, MQTT_NOT_AUTHORIZED);
+
+ MQTT_REASON_CODES = reasonCodes;
+ }
+
+ static
+ {
+ final Int2ObjectHashMap reasons = new Int2ObjectHashMap<>();
+
+ reasons.put(INVALID_SESSION_TIMEOUT_ERROR_CODE, new String16FW(MQTT_INVALID_SESSION_TIMEOUT_REASON));
+
+ MQTT_REASONS = reasons;
+ }
private final BeginFW beginRO = new BeginFW();
private final DataFW dataRO = new DataFW();
@@ -172,6 +200,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory
private final KafkaDataExFW.Builder kafkaDataExRW = new KafkaDataExFW.Builder();
private final KafkaFlushExFW.Builder kafkaFlushExRW = new KafkaFlushExFW.Builder();
private final MqttBeginExFW.Builder mqttSessionBeginExRW = new MqttBeginExFW.Builder();
+ private final MqttResetExFW.Builder mqttSessionResetExRW = new MqttResetExFW.Builder();
private final String16FW binaryFormat = new String16FW(MqttPayloadFormat.BINARY.name());
private final String16FW textFormat = new String16FW(MqttPayloadFormat.TEXT.name());
@@ -205,6 +234,7 @@ public class MqttKafkaSessionFactory implements MqttKafkaStreamFactory
private final InstanceId instanceId;
private final boolean willAvailable;
private final int reconnectDelay;
+ private final Int2ObjectHashMap qosLevels;
private String serverRef;
private int reconnectAttempt;
@@ -246,6 +276,10 @@ public MqttKafkaSessionFactory(
this.sessionExpiryIds = new Object2LongHashMap<>(-1);
this.instanceId = instanceId;
this.reconnectDelay = config.willStreamReconnectDelay();
+ this.qosLevels = new Int2ObjectHashMap<>();
+ this.qosLevels.put(0, new String16FW("0"));
+ this.qosLevels.put(1, new String16FW("1"));
+ this.qosLevels.put(2, new String16FW("2"));
}
@Override
@@ -506,8 +540,7 @@ private void onMqttData(
MqttWillMessageFW will = mqttWillRO.tryWrap(buffer, offset, limit);
this.delay = (int) Math.min(SECONDS.toMillis(will.delay()), sessionExpiryMillis);
- final int expiryInterval = will.expiryInterval() == -1 ? -1 :
- (int) TimeUnit.SECONDS.toMillis(will.expiryInterval());
+ final int expiryInterval = will.expiryInterval() == -1 ? -1 : will.expiryInterval();
final MqttWillMessageFW.Builder willMessageBuilder =
mqttMessageRW.wrap(willMessageBuffer, 0, willMessageBuffer.capacity())
.topic(will.topic())
@@ -2069,6 +2102,8 @@ private void sendWill(
will.properties().forEach(property ->
addHeader(property.key(), property.value()));
+ addHeader(helper.kafkaQosHeaderName, qosLevels.get(will.qos()));
+
kafkaDataEx = kafkaDataExRW
.wrap(extBuffer, 0, extBuffer.capacity())
.typeId(kafkaTypeId)
@@ -3300,10 +3335,25 @@ private void onKafkaReset(
final long sequence = reset.sequence();
final long acknowledge = reset.acknowledge();
final long traceId = reset.traceId();
+ final OctetsFW extension = reset.extension();
assert acknowledge <= sequence;
- delegate.doMqttReset(traceId, EMPTY_OCTETS);
+
+ final KafkaResetExFW kafkaResetEx = extension.get(kafkaResetExRO::tryWrap);
+ final int error = kafkaResetEx != null ? kafkaResetEx.error() : -1;
+
+ Flyweight mqttResetEx = EMPTY_OCTETS;
+ if (error != -1)
+ {
+ mqttResetEx =
+ mqttSessionResetExRW.wrap(sessionExtBuffer, 0, sessionExtBuffer.capacity())
+ .typeId(mqttTypeId)
+ .reasonCode(MQTT_REASON_CODES.get(error))
+ .reason(MQTT_REASONS.get(error))
+ .build();
+ }
+ delegate.doMqttReset(traceId, mqttResetEx);
}
private void doKafkaReset(
diff --git a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java
index 5a0374be62..3ad06d127d 100644
--- a/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java
+++ b/runtime/binding-mqtt-kafka/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeFactory.java
@@ -23,6 +23,7 @@
import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT;
import static io.aklivity.zilla.runtime.engine.concurrent.Signaler.NO_CANCEL_ID;
import static java.lang.System.currentTimeMillis;
+import static java.time.Instant.now;
import static java.util.concurrent.TimeUnit.SECONDS;
@@ -104,7 +105,10 @@ public class MqttKafkaSubscribeFactory implements MqttKafkaStreamFactory
private static final int RETAIN_FLAG = 1 << RETAIN.ordinal();
private static final int RETAIN_AS_PUBLISHED_FLAG = 1 << RETAIN_AS_PUBLISHED.ordinal();
private static final int SIGNAL_CONNECT_BOOTSTRAP_STREAM = 1;
- private static final int DATA_FIN_FLAG = 0x03;
+ private static final int DATA_FLAG_INIT = 0x02;
+ private static final int DATA_FLAG_FIN = 0x01;
+ private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(new UnsafeBuffer(new byte[0]), 0, 0);
+
private final OctetsFW emptyRO = new OctetsFW().wrap(new UnsafeBuffer(0L, 0), 0, 0);
private final BeginFW beginRO = new BeginFW();
private final DataFW dataRO = new DataFW();
@@ -1096,6 +1100,7 @@ final class KafkaMessagesProxy extends KafkaProxy
private long replyAck;
private int replyMax;
private int replyPad;
+ private boolean expiredMessage;
private KafkaMessagesProxy(
long originId,
@@ -1420,6 +1425,7 @@ private void onKafkaData(
assert replyAck <= replySeq;
+ sendData:
if (replySeq > replyAck + replyMax)
{
doKafkaReset(traceId);
@@ -1439,13 +1445,31 @@ private void onKafkaData(
final OctetsFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().key().value() : null;
final long filters = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().filters() : 0;
final KafkaOffsetFW partition = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().partition() : null;
+ final long timestamp = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().timestamp() : 0;
+
- if (key != null)
+ Flyweight mqttSubscribeDataEx = EMPTY_OCTETS;
+ if ((flags & DATA_FLAG_INIT) != 0x00 && key != null)
{
String topicName = kafkaMergedDataEx.fetch().key().value()
.get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o));
helper.visit(kafkaMergedDataEx);
+ long expireInterval;
+ if (helper.timeout != -1)
+ {
+ expireInterval = timestamp + helper.timeout - now().toEpochMilli();
+ if (expireInterval < 0)
+ {
+ expiredMessage = true;
+ break sendData;
+ }
+ }
+ else
+ {
+ expireInterval = helper.timeout;
+ }
+
// If the qos it was created for is 0, set the high watermark, as we won't receive ack
if (mqtt.qos == MqttQoS.AT_MOST_ONCE.value())
{
@@ -1457,7 +1481,7 @@ private void onKafkaData(
}
}
- final MqttDataExFW mqttSubscribeDataEx = mqttDataExRW.wrap(extBuffer, 0, extBuffer.capacity())
+ mqttSubscribeDataEx = mqttDataExRW.wrap(extBuffer, 0, extBuffer.capacity())
.typeId(mqttTypeId)
.subscribe(b ->
{
@@ -1487,9 +1511,10 @@ private void onKafkaData(
}
b.flags(flag);
b.subscriptionIds(subscriptionIdsRW.build());
- if (helper.timeout != -1)
+
+ if (expireInterval != -1)
{
- b.expiryInterval(helper.timeout / 1000);
+ b.expiryInterval((int) expireInterval);
}
if (helper.contentType != null)
{
@@ -1525,7 +1550,10 @@ private void onKafkaData(
}
});
}).build();
+ }
+ if (!expiredMessage)
+ {
if (!MqttKafkaState.initialOpened(mqtt.retained.state) ||
MqttKafkaState.replyClosed(mqtt.retained.state))
{
@@ -1555,6 +1583,11 @@ private void onKafkaData(
messageSlotReserved += reserved;
}
}
+
+ if ((flags & DATA_FLAG_FIN) != 0x00)
+ {
+ expiredMessage = false;
+ }
}
}
@@ -1573,7 +1606,7 @@ private void flushData(
{
final MqttSubscribeMessageFW message = mqttSubscribeMessageRO.wrap(dataBuffer, messageSlotOffset,
dataBuffer.capacity());
- mqtt.doMqttData(traceId, authorization, budgetId, reserved, DATA_FIN_FLAG, message.payload(),
+ mqtt.doMqttData(traceId, authorization, budgetId, reserved, DATA_FLAG_FIN, message.payload(),
message.extension());
messageSlotOffset += message.sizeof();
@@ -1834,6 +1867,7 @@ final class KafkaRetainedProxy extends KafkaProxy
private int replyPad;
private int unAckedPackets;
+ private boolean expiredMessage;
private KafkaRetainedProxy(
long originId,
@@ -1907,7 +1941,6 @@ protected void doKafkaConsumerFlush(
final MqttOffsetStateFlags state = offsetCommit.state;
final int packetId = offsetCommit.packetId;
- boolean shouldClose = false;
if (qos == MqttQoS.EXACTLY_ONCE.value() && state == MqttOffsetStateFlags.COMPLETE)
{
final IntArrayList incompletes = incompletePacketIds.get(offset.partitionId);
@@ -1923,11 +1956,6 @@ protected void doKafkaConsumerFlush(
incompletePacketIds.computeIfAbsent(offset.partitionId, c -> new IntArrayList()).add(packetId);
}
- if (unAckedPackets == 0 && incompletePacketIds.isEmpty())
- {
- shouldClose = true;
- }
-
final int correlationId = state == MqttOffsetStateFlags.INCOMPLETE ? packetId : -1;
final KafkaFlushExFW kafkaFlushEx =
@@ -1949,12 +1977,6 @@ protected void doKafkaConsumerFlush(
doFlush(kafka, originId, routedId, initialId, initialSeq, initialAck, initialMax,
traceId, authorization, budgetId, reserved, kafkaFlushEx);
-
- if (shouldClose)
- {
- mqtt.retainedSubscriptionIds.clear();
- doKafkaEnd(traceId, authorization);
- }
}
private void doKafkaFlush(
@@ -2138,6 +2160,7 @@ private void onKafkaData(
assert replyAck <= replySeq;
+ sendData:
if (replySeq > replyAck + replyMax)
{
doKafkaReset(traceId);
@@ -2157,13 +2180,31 @@ private void onKafkaData(
final OctetsFW key = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().key().value() : null;
final long filters = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().filters() : 0;
final KafkaOffsetFW partition = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().partition() : null;
+ final long timestamp = kafkaMergedDataEx != null ? kafkaMergedDataEx.fetch().timestamp() : 0;
- if (key != null)
+ Flyweight mqttSubscribeDataEx = EMPTY_OCTETS;
+ if ((flags & DATA_FLAG_INIT) != 0x00 && key != null)
{
String topicName = kafkaMergedDataEx.fetch().key().value()
.get((b, o, m) -> b.getStringWithoutLengthUtf8(o, m - o));
helper.visit(kafkaMergedDataEx);
- final Flyweight mqttSubscribeDataEx = mqttDataExRW.wrap(extBuffer, 0, extBuffer.capacity())
+
+ long expireInterval;
+ if (helper.timeout != -1)
+ {
+ expireInterval = timestamp + helper.timeout - now().toEpochMilli();
+ if (expireInterval < 0)
+ {
+ expiredMessage = true;
+ break sendData;
+ }
+ }
+ else
+ {
+ expireInterval = helper.timeout;
+ }
+
+ mqttSubscribeDataEx = mqttDataExRW.wrap(extBuffer, 0, extBuffer.capacity())
.typeId(mqttTypeId)
.subscribe(b ->
{
@@ -2199,9 +2240,9 @@ private void onKafkaData(
}
b.flags(flag);
b.subscriptionIds(subscriptionIdsRW.build());
- if (helper.timeout != -1)
+ if (expireInterval != -1)
{
- b.expiryInterval(helper.timeout / 1000);
+ b.expiryInterval((int) expireInterval);
}
if (helper.contentType != null)
{
@@ -2237,11 +2278,18 @@ private void onKafkaData(
}
});
}).build();
+ }
+ if (!expiredMessage)
+ {
mqtt.doMqttData(traceId, authorization, budgetId, reserved, flags, payload, mqttSubscribeDataEx);
-
mqtt.mqttSharedBudget -= length;
}
+
+ if ((flags & DATA_FLAG_FIN) != 0x00)
+ {
+ expiredMessage = false;
+ }
}
}
@@ -2299,7 +2347,10 @@ private void onKafkaFlush(
.subscribe(b -> b.packetId((int) correlationId)).build();
mqtt.doMqttFlush(traceId, authorization, budgetId, reserved, mqttSubscribeFlushEx);
}
- unAckedPackets--;
+ else
+ {
+ unAckedPackets--;
+ }
}
else
{
diff --git a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java
index 272167bab1..6ca3220c13 100644
--- a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java
+++ b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSessionProxyIT.java
@@ -26,6 +26,7 @@
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.junit.rules.RuleChain.outerRule;
+import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.DisableOnDebug;
@@ -200,6 +201,41 @@ public void shouldGroupStreamReceiveServerSentReset() throws Exception
k3po.finish();
}
+ @Ignore("k3po no extension with rejection")
+ @Test
+ @Configuration("proxy.yaml")
+ @Configure(name = WILL_AVAILABLE_NAME, value = "false")
+ @Specification({
+ "${mqtt}/session.group.reset.not.authorized/client",
+ "${kafka}/session.group.reset.not.authorized/server"})
+ public void shouldGroupStreamReceiveResetNotAuthorized() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Ignore("k3po no extension with rejection")
+ @Test
+ @Configuration("proxy.yaml")
+ @Configure(name = WILL_AVAILABLE_NAME, value = "false")
+ @Specification({
+ "${mqtt}/session.group.reset.invalid.session.timeout/client",
+ "${kafka}/session.group.reset.invalid.session.timeout/server"})
+ public void shouldGroupStreamReceiveResetInvalidSessionTimeout() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Ignore("k3po no extension with rejection")
+ @Test
+ @Configuration("proxy.yaml")
+ @Configure(name = WILL_AVAILABLE_NAME, value = "false")
+ @Specification({
+ "${mqtt}/session.group.reset.invalid.describe.config/client",
+ "${kafka}/session.group.reset.invalid.describe.config/server"})
+ public void shouldGroupStreamReceiveResetInvalidDescribeConfig() throws Exception
+ {
+ k3po.finish();
+ }
@Test
@Configuration("proxy.yaml")
diff --git a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java
index 194d071a67..62e2ac00fc 100644
--- a/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java
+++ b/runtime/binding-mqtt-kafka/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/kafka/internal/stream/MqttKafkaSubscribeProxyIT.java
@@ -152,6 +152,17 @@ public void shouldReceiveOneMessage() throws Exception
k3po.finish();
}
+ @Test
+ @Configuration("proxy.yaml")
+ @Configure(name = WILL_AVAILABLE_NAME, value = "false")
+ @Specification({
+ "${mqtt}/subscribe.one.message/client",
+ "${kafka}/subscribe.one.message.fragmented/server"})
+ public void shouldReceiveOneMessageFragmented() throws Exception
+ {
+ k3po.finish();
+ }
+
@Test
@Configuration("proxy.options.yaml")
@Configure(name = WILL_AVAILABLE_NAME, value = "false")
@@ -251,6 +262,17 @@ public void shouldReceiveRetainedNoRetainAsPublished() throws Exception
k3po.finish();
}
+ @Test
+ @Configuration("proxy.yaml")
+ @Configure(name = WILL_AVAILABLE_NAME, value = "false")
+ @Specification({
+ "${mqtt}/subscribe.retain/client",
+ "${kafka}/subscribe.retain.fragmented/server"})
+ public void shouldReceiveRetainedFragmented() throws Exception
+ {
+ k3po.finish();
+ }
+
@Test
@Configuration("proxy.yaml")
@Configure(name = WILL_AVAILABLE_NAME, value = "false")
@@ -580,4 +602,26 @@ public void shouldReplayRetainedQos2() throws Exception
{
k3po.finish();
}
+
+ @Test
+ @Configuration("proxy.yaml")
+ @Configure(name = WILL_AVAILABLE_NAME, value = "false")
+ @Specification({
+ "${mqtt}/subscribe.expire.message/client",
+ "${kafka}/subscribe.expire.message/server"})
+ public void shouldExpireMessage() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Configuration("proxy.yaml")
+ @Configure(name = WILL_AVAILABLE_NAME, value = "false")
+ @Specification({
+ "${mqtt}/subscribe.expire.message/client",
+ "${kafka}/subscribe.expire.message.fragmented/server"})
+ public void shouldExpireMessageFragmented() throws Exception
+ {
+ k3po.finish();
+ }
}
diff --git a/runtime/binding-mqtt/pom.xml b/runtime/binding-mqtt/pom.xml
index 54be6bb4c8..c3008ed623 100644
--- a/runtime/binding-mqtt/pom.xml
+++ b/runtime/binding-mqtt/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java
index 97a49edfe1..242658a950 100644
--- a/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java
+++ b/runtime/binding-mqtt/src/main/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/MqttServerFactory.java
@@ -2893,19 +2893,18 @@ private int onDecodeConnectWillMessage(
reasonCode = RETAIN_NOT_SUPPORTED;
break decode;
}
- payload.willRetain = (byte) RETAIN_FLAG;
}
- if (payload.willQos > maximumQos)
+ final int flags = connectFlags;
+ final int willFlags = decodeWillFlags(flags);
+ final int willQos = decodeWillQos(flags);
+
+ if (willQos > maximumQos)
{
reasonCode = QOS_NOT_SUPPORTED;
break decode;
}
- final int flags = connectFlags;
- final int willFlags = decodeWillFlags(flags);
- final int willQos = decodeWillQos(flags);
-
if (willFlagSet)
{
final MqttDataExFW.Builder sessionDataExBuilder =
@@ -2951,7 +2950,7 @@ private int onDecodeConnectWillMessage(
if (reasonCode != BAD_USER_NAME_OR_PASSWORD)
{
- doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null, version);
+ doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, false, null, null, version);
}
if (session != null)
@@ -3737,11 +3736,11 @@ private void onDecodeError(
{
if (connected || reasonCode == SESSION_TAKEN_OVER)
{
- doEncodeDisconnect(traceId, authorization, reasonCode, null);
+ doEncodeDisconnect(traceId, authorization, reasonCode, null, null);
}
else
{
- doEncodeConnack(traceId, authorization, reasonCode, false, false, null, version);
+ doEncodeConnack(traceId, authorization, reasonCode, false, false, null, null, version);
}
}
@@ -4206,6 +4205,7 @@ private void doEncodeConnack(
boolean assignedClientId,
boolean sessionPresent,
String16FW serverReference,
+ String16FW reason,
int version)
{
@@ -4215,10 +4215,10 @@ private void doEncodeConnack(
doEncodeConnackV4(traceId, authorization, reasonCode, sessionPresent);
break;
case 5:
- doEncodeConnackV5(traceId, authorization, reasonCode, assignedClientId, sessionPresent, serverReference);
+ doEncodeConnackV5(traceId, authorization, reasonCode, assignedClientId, sessionPresent, serverReference, reason);
break;
default:
- doEncodeConnackV5(traceId, authorization, reasonCode, assignedClientId, sessionPresent, serverReference);
+ doEncodeConnackV5(traceId, authorization, reasonCode, assignedClientId, sessionPresent, serverReference, reason);
break;
}
@@ -4249,7 +4249,8 @@ private void doEncodeConnackV5(
int reasonCode,
boolean assignedClientId,
boolean sessionPresent,
- String16FW serverReference)
+ String16FW serverReference,
+ String16FW reason)
{
int propertiesSize = 0;
@@ -4334,6 +4335,13 @@ private void doEncodeConnackV5(
propertiesSize = mqttProperty.limit();
}
}
+ else if (reason != null && reason.length() != -1)
+ {
+ mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity())
+ .reasonString(reason)
+ .build();
+ propertiesSize = mqttProperty.limit();
+ }
if (serverReference != null)
{
@@ -4450,12 +4458,20 @@ private void doEncodeDisconnect(
long traceId,
long authorization,
int reasonCode,
- String16FW serverReference)
+ String16FW serverReference,
+ String16FW reason)
{
int propertiesSize = 0;
MqttPropertyFW mqttProperty;
- if (serverReference != null)
+ if (reason != null && reason.length() != -1)
+ {
+ mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity())
+ .reasonString(reason)
+ .build();
+ propertiesSize = mqttProperty.limit();
+ }
+ else if (serverReference != null)
{
mqttProperty = mqttPropertyRW.wrap(propertyBuffer, propertiesSize, propertyBuffer.capacity())
.serverReference(serverReference)
@@ -4906,12 +4922,11 @@ private void onSessionReset(
final OctetsFW extension = reset.extension();
final MqttResetExFW mqttResetEx = extension.get(mqttResetExRO::tryWrap);
-
-
if (mqttResetEx != null)
{
String16FW serverRef = mqttResetEx.serverRef();
byte reasonCode = (byte) mqttResetEx.reasonCode();
+ String16FW reason = mqttResetEx.reason();
boolean serverRefExists = serverRef != null && serverRef.asString() != null;
if (reasonCode == SUCCESS)
@@ -4923,13 +4938,14 @@ private void onSessionReset(
{
doCancelConnectTimeout();
doEncodeConnack(traceId, authorization, reasonCode, assignedClientId,
- false, serverRefExists ? serverRef : null, version);
+ false, serverRefExists ? serverRef : null, reason, version);
}
- else
+ else if (version == MQTT_PROTOCOL_VERSION_5)
{
- doEncodeDisconnect(traceId, authorization, reasonCode, serverRefExists ? serverRef : null);
+ doEncodeDisconnect(traceId, authorization, reasonCode, serverRefExists ? serverRef : null, reason);
}
}
+ doNetworkEnd(traceId, authorization);
setInitialClosed();
decodeNetwork(traceId);
cleanupAbort(traceId);
@@ -5020,7 +5036,8 @@ private void onSessionData(
sessionPresent = true;
}
}
- doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, sessionPresent, null, version);
+ doEncodeConnack(traceId, authorization, reasonCode, assignedClientId, sessionPresent,
+ null, null, version);
connected = true;
}
else
@@ -6299,8 +6316,7 @@ private static int decodeWillQos(
int willQos = 0;
if (isSetWillQos(flags))
{
- //TODO shift by 3?
- willQos = (flags & WILL_QOS_MASK) >>> 2;
+ willQos = (flags & WILL_QOS_MASK) >>> 3;
}
return willQos;
}
@@ -6418,8 +6434,6 @@ private final class MqttConnectPayload
{
private byte reasonCode = SUCCESS;
private MqttPropertiesFW willProperties;
- private byte willQos;
- private byte willRetain;
private String16FW willTopic;
private BinaryFW willPayload;
private String16FW username;
@@ -6436,8 +6450,6 @@ private MqttConnectPayload reset()
{
this.reasonCode = SUCCESS;
this.willProperties = null;
- this.willQos = 0;
- this.willRetain = 0;
this.willTopic = null;
this.willPayload = null;
this.username = null;
@@ -6494,12 +6506,6 @@ private int decode(
break;
}
- final byte qos = (byte) ((flags & WILL_QOS_MASK) >>> 3);
- if (qos != 0)
- {
- willQos = (byte) (qos << 1);
- }
-
if (willTopic == null || willTopic.asString().isEmpty())
{
reasonCode = MALFORMED_PACKET;
diff --git a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/SessionIT.java b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/SessionIT.java
index bb5899daa5..b97e80a497 100644
--- a/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/SessionIT.java
+++ b/runtime/binding-mqtt/src/test/java/io/aklivity/zilla/runtime/binding/mqtt/internal/stream/server/v5/SessionIT.java
@@ -244,4 +244,24 @@ public void shouldSubscribeAndPublishToNonDefaultRoute() throws Exception
{
k3po.finish();
}
+
+ @Test
+ @Configuration("server.yaml")
+ @Specification({
+ "${net}/session.invalid.session.timeout.after.connack/client",
+ "${app}/session.invalid.session.timeout.after.connack/server"})
+ public void shouldPropagateMqttReasonCodeAndStringAfterConnack() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Configuration("server.yaml")
+ @Specification({
+ "${net}/session.invalid.session.timeout.before.connack/client",
+ "${app}/session.invalid.session.timeout.before.connack/server"})
+ public void shouldPropagateMqttReasonCodeAndStringBeforeConnack() throws Exception
+ {
+ k3po.finish();
+ }
}
diff --git a/runtime/binding-proxy/pom.xml b/runtime/binding-proxy/pom.xml
index f34697c3c5..db620650da 100644
--- a/runtime/binding-proxy/pom.xml
+++ b/runtime/binding-proxy/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-sse-kafka/pom.xml b/runtime/binding-sse-kafka/pom.xml
index 8e7d5438bd..ffc586be22 100644
--- a/runtime/binding-sse-kafka/pom.xml
+++ b/runtime/binding-sse-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-sse/pom.xml b/runtime/binding-sse/pom.xml
index f34b55bacc..501ca9ede8 100644
--- a/runtime/binding-sse/pom.xml
+++ b/runtime/binding-sse/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-tcp/pom.xml b/runtime/binding-tcp/pom.xml
index b6b6b2fa78..fbf2fe88d0 100644
--- a/runtime/binding-tcp/pom.xml
+++ b/runtime/binding-tcp/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-tls/pom.xml b/runtime/binding-tls/pom.xml
index e162ca7404..e30dd96e41 100644
--- a/runtime/binding-tls/pom.xml
+++ b/runtime/binding-tls/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/binding-ws/pom.xml b/runtime/binding-ws/pom.xml
index 39029c69bd..8a34b13578 100644
--- a/runtime/binding-ws/pom.xml
+++ b/runtime/binding-ws/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/command-metrics/pom.xml b/runtime/command-metrics/pom.xml
index eec02de8ef..ca105dbd71 100644
--- a/runtime/command-metrics/pom.xml
+++ b/runtime/command-metrics/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/command-start/pom.xml b/runtime/command-start/pom.xml
index f680ce4ccd..ff9b46ce3a 100644
--- a/runtime/command-start/pom.xml
+++ b/runtime/command-start/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/command-stop/pom.xml b/runtime/command-stop/pom.xml
index 7f02d48dc4..19acc10fed 100644
--- a/runtime/command-stop/pom.xml
+++ b/runtime/command-stop/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/command/pom.xml b/runtime/command/pom.xml
index fedd061e83..0be1f5cabc 100644
--- a/runtime/command/pom.xml
+++ b/runtime/command/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/engine/pom.xml b/runtime/engine/pom.xml
index 7a8b933dba..debd07f368 100644
--- a/runtime/engine/pom.xml
+++ b/runtime/engine/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/exporter-prometheus/pom.xml b/runtime/exporter-prometheus/pom.xml
index 25f3fae045..012c1552a0 100644
--- a/runtime/exporter-prometheus/pom.xml
+++ b/runtime/exporter-prometheus/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/guard-jwt/pom.xml b/runtime/guard-jwt/pom.xml
index 1e17c5fbad..f6078d1207 100644
--- a/runtime/guard-jwt/pom.xml
+++ b/runtime/guard-jwt/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/metrics-grpc/pom.xml b/runtime/metrics-grpc/pom.xml
index 168a056254..1f5f52c072 100644
--- a/runtime/metrics-grpc/pom.xml
+++ b/runtime/metrics-grpc/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/metrics-http/pom.xml b/runtime/metrics-http/pom.xml
index 5dd48f02e2..9a7e96e71d 100644
--- a/runtime/metrics-http/pom.xml
+++ b/runtime/metrics-http/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/metrics-stream/pom.xml b/runtime/metrics-stream/pom.xml
index 7895abcbfc..4b6b9fc167 100644
--- a/runtime/metrics-stream/pom.xml
+++ b/runtime/metrics-stream/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/pom.xml b/runtime/pom.xml
index 4be397068f..a6dca95af9 100644
--- a/runtime/pom.xml
+++ b/runtime/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/runtime/vault-filesystem/pom.xml b/runtime/vault-filesystem/pom.xml
index 5d3edd2890..591160e47b 100644
--- a/runtime/vault-filesystem/pom.xml
+++ b/runtime/vault-filesystem/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-echo.spec/pom.xml b/specs/binding-echo.spec/pom.xml
index 1a2fadc9f5..30d6dc0700 100644
--- a/specs/binding-echo.spec/pom.xml
+++ b/specs/binding-echo.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-fan.spec/pom.xml b/specs/binding-fan.spec/pom.xml
index 80d821b2a7..8fa4b0f72c 100644
--- a/specs/binding-fan.spec/pom.xml
+++ b/specs/binding-fan.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-filesystem.spec/pom.xml b/specs/binding-filesystem.spec/pom.xml
index fc91b7f7a6..010cde4f3f 100644
--- a/specs/binding-filesystem.spec/pom.xml
+++ b/specs/binding-filesystem.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-grpc-kafka.spec/pom.xml b/specs/binding-grpc-kafka.spec/pom.xml
index a04b5ef458..ade82b51f8 100644
--- a/specs/binding-grpc-kafka.spec/pom.xml
+++ b/specs/binding-grpc-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-grpc.spec/pom.xml b/specs/binding-grpc.spec/pom.xml
index 3a7c03a1b1..74792dd937 100644
--- a/specs/binding-grpc.spec/pom.xml
+++ b/specs/binding-grpc.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/schema/grpc.schema.patch.json b/specs/binding-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/schema/grpc.schema.patch.json
index 9951d74743..b1b14db7d9 100644
--- a/specs/binding-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/schema/grpc.schema.patch.json
+++ b/specs/binding-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/grpc/schema/grpc.schema.patch.json
@@ -31,6 +31,7 @@
{
"enum": [ "server", "client"]
},
+ "vault": false,
"options":
{
"properties":
diff --git a/specs/binding-http-filesystem.spec/pom.xml b/specs/binding-http-filesystem.spec/pom.xml
index d4602bef78..765523135d 100644
--- a/specs/binding-http-filesystem.spec/pom.xml
+++ b/specs/binding-http-filesystem.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-http-kafka.spec/pom.xml b/specs/binding-http-kafka.spec/pom.xml
index cd5a8b12ae..38bd2726de 100644
--- a/specs/binding-http-kafka.spec/pom.xml
+++ b/specs/binding-http-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-http.spec/pom.xml b/specs/binding-http.spec/pom.xml
index 2007a9f4ba..6839392af9 100644
--- a/specs/binding-http.spec/pom.xml
+++ b/specs/binding-http.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-kafka-grpc.spec/pom.xml b/specs/binding-kafka-grpc.spec/pom.xml
index 7c22b1ffe1..5a5b0e154f 100644
--- a/specs/binding-kafka-grpc.spec/pom.xml
+++ b/specs/binding-kafka-grpc.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-kafka-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/grpc/schema/kafka.grpc.schema.patch.json b/specs/binding-kafka-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/grpc/schema/kafka.grpc.schema.patch.json
index 3fd783ad61..61dc8be8d0 100644
--- a/specs/binding-kafka-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/grpc/schema/kafka.grpc.schema.patch.json
+++ b/specs/binding-kafka-grpc.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/grpc/schema/kafka.grpc.schema.patch.json
@@ -170,19 +170,19 @@
"type": "string"
}
},
- "additionalProperties": false
- },
- "required":
- [
- "scheme",
- "authority"
- ]
- }
- },
- "required":
- [
- "with"
- ]
+ "additionalProperties": false,
+ "required":
+ [
+ "scheme",
+ "authority"
+ ]
+ }
+ },
+ "required":
+ [
+ "with"
+ ]
+ }
},
"exit": false
},
diff --git a/specs/binding-kafka.spec/pom.xml b/specs/binding-kafka.spec/pom.xml
index 1ff7b729d8..58559fec3d 100644
--- a/specs/binding-kafka.spec/pom.xml
+++ b/specs/binding-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java
index f6c58c2bc7..b49479eeb1 100644
--- a/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java
+++ b/specs/binding-kafka.spec/src/main/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctions.java
@@ -1417,6 +1417,20 @@ public KafkaGroupBeginExBuilder instanceId(
return this;
}
+ public KafkaGroupBeginExBuilder host(
+ String host)
+ {
+ groupBeginExRW.host(host);
+ return this;
+ }
+
+ public KafkaGroupBeginExBuilder port(
+ int port)
+ {
+ groupBeginExRW.port(port);
+ return this;
+ }
+
public KafkaGroupBeginExBuilder timeout(
int timeout)
{
@@ -1514,6 +1528,20 @@ public KafkaOffsetFetchBeginExBuilder groupId(
return this;
}
+ public KafkaOffsetFetchBeginExBuilder host(
+ String host)
+ {
+ offsetFetchBeginExRW.host(host);
+ return this;
+ }
+
+ public KafkaOffsetFetchBeginExBuilder port(
+ int port)
+ {
+ offsetFetchBeginExRW.port(port);
+ return this;
+ }
+
public KafkaOffsetFetchBeginExBuilder topic(
String topic)
{
@@ -5357,6 +5385,8 @@ public final class KafkaGroupBeginExMatcherBuilder
private String16FW groupId;
private String16FW protocol;
private String16FW instanceId;
+ private String16FW host;
+ private Integer port;
private Integer timeout;
private byte[] metadata;
@@ -5393,6 +5423,20 @@ public KafkaGroupBeginExMatcherBuilder instanceId(
return this;
}
+ public KafkaGroupBeginExMatcherBuilder host(
+ String host)
+ {
+ this.host = new String16FW(host);
+ return this;
+ }
+
+ public KafkaGroupBeginExMatcherBuilder port(
+ int port)
+ {
+ this.port = port;
+ return this;
+ }
+
public KafkaGroupBeginExMatcherBuilder metadata(
byte[] metadata)
{
@@ -5413,6 +5457,8 @@ private boolean match(
matchProtocol(groupBeginEx) &&
matchTimeout(groupBeginEx) &&
matchInstanceId(groupBeginEx) &&
+ matchHost(groupBeginEx) &&
+ matchPort(groupBeginEx) &&
matchMetadata(groupBeginEx);
}
@@ -5440,6 +5486,18 @@ private boolean matchInstanceId(
return instanceId == null || instanceId.equals(groupBeginExFW.instanceId());
}
+ private boolean matchHost(
+ final KafkaGroupBeginExFW groupBeginExFW)
+ {
+ return host == null || host.equals(groupBeginExFW.host());
+ }
+
+ private boolean matchPort(
+ final KafkaGroupBeginExFW groupBeginExFW)
+ {
+ return port == null || port == groupBeginExFW.port();
+ }
+
private boolean matchMetadata(
final KafkaGroupBeginExFW groupBeginExFW)
{
diff --git a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl
index ab26e9ccb6..b9bc62e50c 100644
--- a/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl
+++ b/specs/binding-kafka.spec/src/main/resources/META-INF/zilla/kafka.idl
@@ -400,6 +400,8 @@ scope kafka
string16 groupId;
string16 protocol;
string16 instanceId = null;
+ string16 host = null;
+ int32 port = 0;
int32 timeout;
varint32 metadataLen;
octets[metadataLen] metadata = null;
@@ -424,6 +426,8 @@ scope kafka
{
string16 groupId;
string16 consumerId;
+ string16 host = null;
+ int32 port = 0;
int32 timeout;
string16 topic;
KafkaTopicPartition[] partitionIds;
@@ -457,6 +461,8 @@ scope kafka
struct KafkaOffsetFetchBeginEx
{
string16 groupId;
+ string16 host = null;
+ int32 port = 0;
string16 topic;
KafkaTopicPartition[] partitions;
}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json
index f05d23d283..be4715ed50 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json
@@ -77,7 +77,8 @@
"deltaType":
{
"type": "string",
- "enum": [ "none", "json_patch" ]
+ "enum": [ "none", "json_patch" ],
+ "deprecated": true
},
"key":
{
@@ -230,7 +231,8 @@
"groupId":
{
"title": "groupId",
- "type": "string"
+ "type": "string",
+ "deprecated": true
}
},
"additionalProperties": false
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/client.rpt
index 3db9611ec3..bb7635fdba 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/client.rpt
@@ -42,6 +42,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("client-1")
.protocol("rebalance")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/server.rpt
index 5174434b63..93f0a7753e 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/consumer/commit.acknowledge.message.offset/server.rpt
@@ -46,6 +46,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("client-1")
.protocol("rebalance")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/client.rpt
index 2dc3be52c5..a549d08744 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/server.rpt
index a293f4dee1..c7cce87c75 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.read.abort.after.sync.group.response/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/client.rpt
index 4329b6a927..49d7c86c06 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/server.rpt
index 1378dc8569..85ebf4994f 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.join.group.response/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/client.rpt
index a3cb890870..45761ae379 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/server.rpt
index e250ecc951..24d8fa5314 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/client.sent.write.abort.after.sync.group.response/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt
index 531aae7049..77d1e5a543 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/client.rpt
@@ -36,6 +36,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt
index 4c54c801ea..053771f954 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/ignore.heartbeat.before.handshake/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/client.rpt
index b2985823ab..0681f574df 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/server.rpt
index bde6ec6d0c..190f5d143e 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/leader.assignment/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/client.rpt
index 268d5827eb..7b80d69206 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
@@ -80,6 +82,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/server.rpt
index 847812fbe4..45a9295835 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.multiple.members.with.same.group.id/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
@@ -81,6 +83,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt
index 9c599cd1dc..f7e8a73b5f 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt
index e31f5270a4..37853f30cd 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.heartbeat.unknown.member/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt
index fb16829e78..02b8d2ee7f 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/client.rpt
@@ -43,6 +43,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(45000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt
index dbf1f773d7..9d4a24ccf4 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader.in.parallel/server.rpt
@@ -54,6 +54,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt
index 6b02b9ec46..9d0671ff4d 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt
index 2527180360..ab1377d3cc 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander.migrate.leader/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt
index db1cfbc9db..bb64576edb 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt
index 4365349c6c..bd287c531f 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.highlander/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt
index 38d15fe42a..12932685e8 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("unknown")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt
index 8a426fc6aa..797cd3814a 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.protocol.unknown/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("unknown")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt
index 1f64ba2095..bed3ada860 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/client.rpt
@@ -35,6 +35,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt
index b0ccda7c59..a6a033eee2 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/group/rebalance.sync.group/server.rpt
@@ -39,6 +39,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("test")
.protocol("highlander")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/client.rpt
index 608dc73d7a..5bf4738102 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/client.rpt
@@ -135,6 +135,8 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.group()
.groupId("client-1")
.protocol("rebalance")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
@@ -193,6 +195,8 @@ write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.offsetFetch()
.groupId("client-1")
+ .host("localhost")
+ .port(9092)
.topic("test")
.partition(0)
.build()
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/server.rpt
index 71b5c0264b..07c865357d 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/merged/unmerged.group.fetch.message.ack/server.rpt
@@ -134,6 +134,8 @@ write zilla:begin.ext ${kafka:beginEx()
.groupId("client-1")
.protocol("rebalance")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(30000)
.build()
.build()}
@@ -188,6 +190,8 @@ read zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.offsetFetch()
.groupId("client-1")
+ .host("localhost")
+ .port(9092)
.topic("test")
.partition(0)
.build()
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/client.rpt
index c0b4da2a16..726e94ed6d 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/client.rpt
@@ -22,6 +22,8 @@ write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.offsetFetch()
.groupId("client-1")
+ .host("localhost")
+ .port(9092)
.topic("test")
.partition(0)
.build()
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/server.rpt
index eabe9ae2d4..a750c62abc 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info.incomplete/server.rpt
@@ -26,6 +26,8 @@ read zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.offsetFetch()
.groupId("client-1")
+ .host("localhost")
+ .port(9092)
.topic("test")
.partition(0)
.build()
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/client.rpt
index 822de76544..9f8784881c 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/client.rpt
@@ -22,6 +22,8 @@ write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.offsetFetch()
.groupId("client-1")
+ .host("localhost")
+ .port(9092)
.topic("test")
.partition(0)
.build()
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/server.rpt
index ae5f883176..ee6d437f1e 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.info/server.rpt
@@ -26,6 +26,8 @@ read zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.offsetFetch()
.groupId("client-1")
+ .host("localhost")
+ .port(9092)
.topic("test")
.partition(0)
.build()
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/client.rpt
index 3872fd9934..7bb6c2991f 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/client.rpt
@@ -22,6 +22,8 @@ write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.offsetFetch()
.groupId("client-1")
+ .host("localhost")
+ .port(9092)
.topic("test")
.partition(0)
.build()
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/server.rpt
index 0b19ef32cf..672d3870e4 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/offset.fetch/topic.offset.no.partition/server.rpt
@@ -26,6 +26,8 @@ read zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.offsetFetch()
.groupId("client-1")
+ .host("localhost")
+ .port(9092)
.topic("test")
.partition(0)
.build()
diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java
index 9b29fff00c..4e8c7318ae 100644
--- a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java
+++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/internal/KafkaFunctionsTest.java
@@ -4167,6 +4167,8 @@ public void shouldGenerateGroupBeginExtension()
.groupId("test")
.protocol("roundrobin")
.instanceId("client-1")
+ .host("localhost")
+ .port(9092)
.timeout(10)
.metadata("test".getBytes())
.build()
@@ -4180,6 +4182,9 @@ public void shouldGenerateGroupBeginExtension()
final KafkaGroupBeginExFW groupBeginEx = beginEx.group();
assertEquals("test", groupBeginEx.groupId().asString());
assertEquals("roundrobin", groupBeginEx.protocol().asString());
+ assertEquals("client-1", groupBeginEx.instanceId().asString());
+ assertEquals("localhost", groupBeginEx.host().asString());
+ assertEquals(9092, groupBeginEx.port());
assertEquals(10, groupBeginEx.timeout());
}
@@ -4238,6 +4243,8 @@ public void shouldGenerateOffsetFetchBeginExtension()
.typeId(0x01)
.offsetFetch()
.groupId("test")
+ .host("localhost")
+ .port(9092)
.topic("topic")
.partition(0)
.build()
@@ -4250,6 +4257,8 @@ public void shouldGenerateOffsetFetchBeginExtension()
final KafkaOffsetFetchBeginExFW offsetFetchBeginEx = beginEx.offsetFetch();
assertEquals("topic", offsetFetchBeginEx.topic().asString());
+ assertEquals("localhost", offsetFetchBeginEx.host().asString());
+ assertEquals(9092, offsetFetchBeginEx.port());
assertEquals(1, offsetFetchBeginEx.partitions().fieldCount());
}
@@ -4285,6 +4294,8 @@ public void shouldMatchGroupBeginExtension() throws Exception
.groupId("test")
.protocol("roundrobin")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(10)
.metadata("meta".getBytes())
.build()
@@ -4299,6 +4310,8 @@ public void shouldMatchGroupBeginExtension() throws Exception
.groupId("test")
.protocol("roundrobin")
.instanceId("zilla")
+ .host("localhost")
+ .port(9092)
.timeout(10)
.metadataLen("meta".length())
.metadata(m -> m.set("test".getBytes())))
diff --git a/specs/binding-mqtt-kafka.spec/pom.xml b/specs/binding-mqtt-kafka.spec/pom.xml
index 0bcdb84f01..59033c25e9 100644
--- a/specs/binding-mqtt-kafka.spec/pom.xml
+++ b/specs/binding-mqtt-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json
index 3bc637ff87..716f9584e4 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/schema/mqtt.kafka.schema.patch.json
@@ -109,11 +109,15 @@
"type": "array",
"items":
{
- "topic":
+ "properties":
{
- "title": "Topic",
- "type": "string"
- }
+ "topic":
+ {
+ "title": "Topic",
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
}
}
}
@@ -123,15 +127,19 @@
{
"publish":
{
- "title": "Subscribe",
+ "title": "Publish",
"type": "array",
"items":
{
- "topic":
+ "properties":
{
- "title": "Topic",
- "type": "string"
- }
+ "topic":
+ {
+ "title": "Topic",
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
}
}
}
@@ -141,18 +149,15 @@
},
"with":
{
- "items":
+ "properties":
{
- "properties":
+ "messages":
{
- "messages":
- {
- "title": "Messages Topic",
- "type": "string"
- }
- },
- "additionalProperties": false
- }
+ "title": "Messages Topic",
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
}
},
"required":
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt
index 7af4769612..6faa447bb7 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/client.rpt
@@ -39,7 +39,7 @@ write zilla:data.ext ${kafka:dataEx()
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
- .headerInt("zilla:timeout-ms", 15000)
+ .headerInt("zilla:expiry", 15)
.header("zilla:content-type", "message")
.header("zilla:format", "TEXT")
.header("zilla:reply-to", "messages")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt
index 6a6cc098b0..a9653e2d70 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message.changed.topic.name/server.rpt
@@ -42,7 +42,7 @@ read zilla:data.ext ${kafka:matchDataEx()
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
- .headerInt("zilla:timeout-ms", 15000)
+ .headerInt("zilla:expiry", 15)
.header("zilla:content-type", "message")
.header("zilla:format", "TEXT")
.header("zilla:reply-to", "messages")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt
index 49f76cf963..66637714da 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/client.rpt
@@ -39,7 +39,7 @@ write zilla:data.ext ${kafka:dataEx()
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
- .headerInt("zilla:timeout-ms", 15000)
+ .headerInt("zilla:expiry", 15)
.header("zilla:content-type", "message")
.header("zilla:format", "TEXT")
.header("zilla:reply-to", "mqtt-messages")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt
index 82b379a218..468e8b279f 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/publish.one.message/server.rpt
@@ -42,7 +42,7 @@ read zilla:data.ext ${kafka:matchDataEx()
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
- .headerInt("zilla:timeout-ms", 15000)
+ .headerInt("zilla:expiry", 15)
.header("zilla:content-type", "message")
.header("zilla:format", "TEXT")
.header("zilla:reply-to", "mqtt-messages")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.describe.config/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.describe.config/client.rpt
new file mode 100644
index 0000000000..1dbf3d9fd4
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.describe.config/client.rpt
@@ -0,0 +1,72 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("PRODUCE_AND_FETCH")
+ .topic("mqtt-sessions")
+ .groupId("mqtt-clients")
+ .filter()
+ .key("client-1#migrate")
+ .headerNot("sender-id", "sender-1")
+ .build()
+ .build()
+ .build()}
+
+connected
+
+write zilla:data.ext ${kafka:dataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .produce()
+ .deferred(0)
+ .partition(-1, -1)
+ .key("client-1#migrate")
+ .hashKey("client-1")
+ .header("sender-id", "sender-1")
+ .build()
+ .build()}
+write zilla:data.empty
+write flush
+write notify SENT_INIT_MIGRATE
+
+
+connect await SENT_INIT_MIGRATE
+ "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .group()
+ .groupId("client-1-session")
+ .protocol("highlander")
+ .timeout(1000)
+ .build()
+ .build()}
+
+connected
+
+read zilla:reset.ext ${kafka:resetEx()
+ .typeId(zilla:id("kafka"))
+ .error(35)
+ .build()}
+
+write aborted
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.describe.config/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.describe.config/server.rpt
new file mode 100644
index 0000000000..9ac10dd099
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.describe.config/server.rpt
@@ -0,0 +1,70 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("PRODUCE_AND_FETCH")
+ .topic("mqtt-sessions")
+ .groupId("mqtt-clients")
+ .filter()
+ .key("client-1#migrate")
+ .headerNot("sender-id", "sender-1")
+ .build()
+ .build()
+ .build()}
+
+connected
+
+read zilla:data.ext ${kafka:matchDataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .produce()
+ .deferred(0)
+ .partition(-1, -1)
+ .key("client-1#migrate")
+ .hashKey("client-1")
+ .header("sender-id", "sender-1")
+ .build()
+ .build()}
+read zilla:data.empty
+
+
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .group()
+ .groupId("client-1-session")
+ .protocol("highlander")
+ .timeout(1000)
+ .build()
+ .build()}
+
+connected
+
+write zilla:reset.ext ${kafka:resetEx()
+ .typeId(zilla:id("kafka"))
+ .error(35)
+ .build()}
+
+read abort
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.session.timeout/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.session.timeout/client.rpt
new file mode 100644
index 0000000000..bd9cc67716
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.session.timeout/client.rpt
@@ -0,0 +1,72 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("PRODUCE_AND_FETCH")
+ .topic("mqtt-sessions")
+ .groupId("mqtt-clients")
+ .filter()
+ .key("client-1#migrate")
+ .headerNot("sender-id", "sender-1")
+ .build()
+ .build()
+ .build()}
+
+connected
+
+write zilla:data.ext ${kafka:dataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .produce()
+ .deferred(0)
+ .partition(-1, -1)
+ .key("client-1#migrate")
+ .hashKey("client-1")
+ .header("sender-id", "sender-1")
+ .build()
+ .build()}
+write zilla:data.empty
+write flush
+write notify SENT_INIT_MIGRATE
+
+
+connect await SENT_INIT_MIGRATE
+ "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .group()
+ .groupId("client-1-session")
+ .protocol("highlander")
+ .timeout(1000)
+ .build()
+ .build()}
+
+connected
+
+read zilla:reset.ext ${kafka:resetEx()
+ .typeId(zilla:id("kafka"))
+ .error(26)
+ .build()}
+
+write aborted
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.session.timeout/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.session.timeout/server.rpt
new file mode 100644
index 0000000000..81bea19f30
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.invalid.session.timeout/server.rpt
@@ -0,0 +1,70 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("PRODUCE_AND_FETCH")
+ .topic("mqtt-sessions")
+ .groupId("mqtt-clients")
+ .filter()
+ .key("client-1#migrate")
+ .headerNot("sender-id", "sender-1")
+ .build()
+ .build()
+ .build()}
+
+connected
+
+read zilla:data.ext ${kafka:matchDataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .produce()
+ .deferred(0)
+ .partition(-1, -1)
+ .key("client-1#migrate")
+ .hashKey("client-1")
+ .header("sender-id", "sender-1")
+ .build()
+ .build()}
+read zilla:data.empty
+
+
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .group()
+ .groupId("client-1-session")
+ .protocol("highlander")
+ .timeout(1000)
+ .build()
+ .build()}
+
+connected
+
+write zilla:reset.ext ${kafka:resetEx()
+ .typeId(zilla:id("kafka"))
+ .error(26)
+ .build()}
+
+read abort
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.not.authorized/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.not.authorized/client.rpt
new file mode 100644
index 0000000000..09c1567649
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.not.authorized/client.rpt
@@ -0,0 +1,72 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("PRODUCE_AND_FETCH")
+ .topic("mqtt-sessions")
+ .groupId("mqtt-clients")
+ .filter()
+ .key("client-1#migrate")
+ .headerNot("sender-id", "sender-1")
+ .build()
+ .build()
+ .build()}
+
+connected
+
+write zilla:data.ext ${kafka:dataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .produce()
+ .deferred(0)
+ .partition(-1, -1)
+ .key("client-1#migrate")
+ .hashKey("client-1")
+ .header("sender-id", "sender-1")
+ .build()
+ .build()}
+write zilla:data.empty
+write flush
+write notify SENT_INIT_MIGRATE
+
+
+connect await SENT_INIT_MIGRATE
+ "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .group()
+ .groupId("client-1-session")
+ .protocol("highlander")
+ .timeout(1000)
+ .build()
+ .build()}
+
+connected
+
+read zilla:reset.ext ${kafka:resetEx()
+ .typeId(zilla:id("kafka"))
+ .error(30)
+ .build()}
+
+write aborted
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.not.authorized/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.not.authorized/server.rpt
new file mode 100644
index 0000000000..edb6577317
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.group.reset.not.authorized/server.rpt
@@ -0,0 +1,70 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("PRODUCE_AND_FETCH")
+ .topic("mqtt-sessions")
+ .groupId("mqtt-clients")
+ .filter()
+ .key("client-1#migrate")
+ .headerNot("sender-id", "sender-1")
+ .build()
+ .build()
+ .build()}
+
+connected
+
+read zilla:data.ext ${kafka:matchDataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .produce()
+ .deferred(0)
+ .partition(-1, -1)
+ .key("client-1#migrate")
+ .hashKey("client-1")
+ .header("sender-id", "sender-1")
+ .build()
+ .build()}
+read zilla:data.empty
+
+
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .group()
+ .groupId("client-1-session")
+ .protocol("highlander")
+ .timeout(1000)
+ .build()
+ .build()}
+
+connected
+
+write zilla:reset.ext ${kafka:resetEx()
+ .typeId(zilla:id("kafka"))
+ .error(30)
+ .build()}
+
+read abort
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt
index faaec76f1d..e602fe552f 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/client.rpt
@@ -462,9 +462,9 @@ write zilla:data.ext ${kafka:dataEx()
.key("obituaries")
.header("zilla:filter", "obituaries")
.header("zilla:format", "TEXT")
+ .header("zilla:qos", "0")
.build()
.build()}
-
write "client-1 disconnected abruptly"
write flush
@@ -498,6 +498,7 @@ write zilla:data.ext ${kafka:dataEx()
.key("obituaries")
.header("zilla:filter", "obituaries")
.header("zilla:format", "TEXT")
+ .header("zilla:qos", "0")
.build()
.build()}
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt
index bae6ac1947..280c8eac75 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will.retain/server.rpt
@@ -463,6 +463,7 @@ read zilla:data.ext ${kafka:matchDataEx()
.key("obituaries")
.header("zilla:filter", "obituaries")
.header("zilla:format", "TEXT")
+ .header("zilla:qos", "0")
.build()
.build()}
read "client-1 disconnected abruptly"
@@ -495,6 +496,7 @@ read zilla:data.ext ${kafka:matchDataEx()
.key("obituaries")
.header("zilla:filter", "obituaries")
.header("zilla:format", "TEXT")
+ .header("zilla:qos", "0")
.build()
.build()}
read "client-1 disconnected abruptly"
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt
index cb20e66539..b237afd9f6 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/client.rpt
@@ -295,7 +295,7 @@ write zilla:data.ext ${kafka:dataEx()
write ${mqtt:will()
.topic("obituaries")
.delay(1000)
- .expiryInterval(15000)
+ .expiryInterval(15)
.format("TEXT")
.responseTopic("responses/client1")
.lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1")
@@ -427,7 +427,7 @@ read zilla:data.ext ${kafka:matchDataEx()
read ${mqtt:will()
.topic("obituaries")
.delay(1000)
- .expiryInterval(15000)
+ .expiryInterval(15)
.format("TEXT")
.responseTopic("responses/client1")
.lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1")
@@ -467,13 +467,14 @@ write zilla:data.ext ${kafka:dataEx()
.partition(-1, -1)
.key("obituaries")
.header("zilla:filter", "obituaries")
- .headerInt("zilla:timeout-ms", 15000)
+ .headerInt("zilla:expiry", 15)
.header("zilla:format", "TEXT")
.header("zilla:reply-to", "mqtt-messages")
.header("zilla:reply-key", "responses/client1")
.header("zilla:reply-filter", "responses")
.header("zilla:reply-filter", "client1")
.header("zilla:correlation-id", "info")
+ .header("zilla:qos", "0")
.build()
.build()}
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt
index 68a08801fc..206d00b608 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.abort.deliver.will/server.rpt
@@ -298,7 +298,7 @@ read zilla:data.ext ${kafka:matchDataEx()
read ${mqtt:will()
.topic("obituaries")
.delay(1000)
- .expiryInterval(15000)
+ .expiryInterval(15)
.format("TEXT")
.responseTopic("responses/client1")
.lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1")
@@ -428,7 +428,7 @@ write zilla:data.ext ${kafka:dataEx()
write ${mqtt:will()
.topic("obituaries")
.delay(1000)
- .expiryInterval(15000)
+ .expiryInterval(15)
.format("TEXT")
.responseTopic("responses/client1")
.lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1")
@@ -466,13 +466,14 @@ read zilla:data.ext ${kafka:matchDataEx()
.partition(-1, -1)
.key("obituaries")
.header("zilla:filter", "obituaries")
- .headerInt("zilla:timeout-ms", 15000)
+ .headerInt("zilla:expiry", 15)
.header("zilla:format", "TEXT")
.header("zilla:reply-to", "mqtt-messages")
.header("zilla:reply-key", "responses/client1")
.header("zilla:reply-filter", "responses")
.header("zilla:reply-filter", "client1")
.header("zilla:correlation-id", "info")
+ .header("zilla:qos", "0")
.build()
.build()}
read "client-1 disconnected abruptly"
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt
index 1d8e0ba4ec..39cef5e1a5 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/client.rpt
@@ -460,6 +460,7 @@ write zilla:data.ext ${kafka:dataEx()
.key("obituaries")
.header("zilla:filter", "obituaries")
.header("zilla:format", "TEXT")
+ .header("zilla:qos", "0")
.build()
.build()}
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt
index 3a75ce6250..41d9f2f33c 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.takeover.deliver.will/server.rpt
@@ -466,6 +466,7 @@ read zilla:data.ext ${kafka:matchDataEx()
.key("obituaries")
.header("zilla:filter", "obituaries")
.header("zilla:format", "TEXT")
+ .header("zilla:qos", "0")
.build()
.build()}
read "client-1 disconnected abruptly"
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt
index 03006dbc80..e558c9c0a8 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/client.rpt
@@ -269,7 +269,7 @@ write zilla:data.ext ${kafka:dataEx()
write ${mqtt:will()
.topic("obituaries")
.delay(1000)
- .expiryInterval(15000)
+ .expiryInterval(15)
.format("TEXT")
.responseTopic("responses/client1")
.lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1")
@@ -399,7 +399,7 @@ read zilla:data.ext ${kafka:matchDataEx()
read ${mqtt:will()
.topic("obituaries")
.delay(1000)
- .expiryInterval(15000)
+ .expiryInterval(15)
.format("TEXT")
.responseTopic("responses/client1")
.lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt
index 5c0d7a4597..5ae6d8c246 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/session.will.message.will.id.mismatch.skip.delivery/server.rpt
@@ -269,7 +269,7 @@ read zilla:data.ext ${kafka:matchDataEx()
read ${mqtt:will()
.topic("obituaries")
.delay(1000)
- .expiryInterval(15000)
+ .expiryInterval(15)
.format("TEXT")
.responseTopic("responses/client1")
.lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1")
@@ -399,7 +399,7 @@ write zilla:data.ext ${kafka:dataEx()
write ${mqtt:will()
.topic("obituaries")
.delay(1000)
- .expiryInterval(15000)
+ .expiryInterval(15)
.format("TEXT")
.responseTopic("responses/client1")
.lifetimeId("1e6a1eb5-810a-459d-a12c-a6fa08f228d1")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message.fragmented/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message.fragmented/client.rpt
new file mode 100644
index 0000000000..079b44646c
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message.fragmented/client.rpt
@@ -0,0 +1,70 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .headerNot("zilla:qos", "1")
+ .headerNot("zilla:qos", "2")
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .partition(0, 0, 1, 1)
+ .build()
+ .build()}
+
+connected
+
+read zilla:data.ext ${kafka:matchDataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .timestamp(timestamp)
+ .filters(1)
+ .partition(0, 2, 2)
+ .progress(0, 3)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .headerInt("zilla:expiry", 1)
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+read "mess"
+
+read "age"
+
+write close
+read closed
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message.fragmented/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message.fragmented/server.rpt
new file mode 100644
index 0000000000..5887c752ea
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message.fragmented/server.rpt
@@ -0,0 +1,79 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+property deltaMillis 1000L
+property timestamp ${kafka:timestamp() - deltaMillis}
+
+accept "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .headerNot("zilla:qos", "1")
+ .headerNot("zilla:qos", "2")
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .partition(0, 0, 1, 1)
+ .build()
+ .build()}
+
+connected
+
+write option zilla:flags "init"
+write zilla:data.ext ${kafka:dataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .timestamp(timestamp)
+ .filters(1)
+ .partition(0, 2, 2)
+ .progress(0, 3)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .headerInt("zilla:expiry", 1)
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+write "mess"
+write flush
+
+write option zilla:flags "fin"
+write "age"
+write flush
+
+read closed
+write close
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message/client.rpt
new file mode 100644
index 0000000000..3a2100ce41
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message/client.rpt
@@ -0,0 +1,69 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .headerNot("zilla:qos", "1")
+ .headerNot("zilla:qos", "2")
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .partition(0, 0, 1, 1)
+ .build()
+ .build()}
+
+connected
+
+
+read zilla:data.ext ${kafka:matchDataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .timestamp(timestamp)
+ .filters(1)
+ .partition(0, 2, 2)
+ .progress(0, 3)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .headerInt("zilla:expiry", 1)
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+read "message"
+
+write close
+read closed
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message/server.rpt
new file mode 100644
index 0000000000..ef0389a20e
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.expire.message/server.rpt
@@ -0,0 +1,74 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+property deltaMillis 1000L
+property timestamp ${kafka:timestamp() - deltaMillis}
+
+accept "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .headerNot("zilla:qos", "1")
+ .headerNot("zilla:qos", "2")
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .partition(0, 0, 1, 1)
+ .build()
+ .build()}
+
+connected
+
+write zilla:data.ext ${kafka:dataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .timestamp(timestamp)
+ .filters(1)
+ .partition(0, 2, 2)
+ .progress(0, 3)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .headerInt("zilla:expiry", 1)
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+write "message"
+write flush
+
+read closed
+write close
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.fragmented/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.fragmented/client.rpt
new file mode 100644
index 0000000000..cb8138e477
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.fragmented/client.rpt
@@ -0,0 +1,55 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+connected
+
+read zilla:data.ext ${kafka:matchDataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .filters(1)
+ .partition(0, 1, 2)
+ .progress(0, 2)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+
+read "mess"
+
+read "age"
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.fragmented/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.fragmented/server.rpt
new file mode 100644
index 0000000000..b70fdeac42
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.fragmented/server.rpt
@@ -0,0 +1,61 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+connected
+
+write option zilla:flags "init"
+write zilla:data.ext ${kafka:dataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .timestamp(kafka:timestamp())
+ .filters(1)
+ .partition(0, 1, 2)
+ .progress(0, 2)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+write "mess"
+write flush
+
+write option zilla:flags "fin"
+write "age"
+write flush
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt
index de8fbc0dc3..1b56acd62d 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt
@@ -46,7 +46,6 @@ read zilla:data.ext ${kafka:matchDataEx()
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
- .headerInt("zilla:timeout-ms", 15000)
.header("zilla:content-type", "message")
.header("zilla:format", "TEXT")
.header("zilla:reply-to", "sensor/one")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt
index 9742cf9769..ec25d52edd 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt
@@ -49,7 +49,6 @@ write zilla:data.ext ${kafka:dataEx()
.header("zilla:filter", "sensor")
.header("zilla:filter", "one")
.header("zilla:local", "client")
- .headerInt("zilla:timeout-ms", 15000)
.header("zilla:content-type", "message")
.header("zilla:format", "TEXT")
.header("zilla:reply-to", "sensor/one")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain.fragmented/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain.fragmented/client.rpt
new file mode 100644
index 0000000000..b6624d3f9a
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain.fragmented/client.rpt
@@ -0,0 +1,103 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-retained")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+connected
+
+
+read zilla:data.ext ${kafka:matchDataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .filters(1)
+ .partition(0, 1, 2)
+ .progress(0, 2)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+
+read "mess"
+
+read "age"
+
+read advised zilla:flush
+
+write close
+read closed
+
+write notify RETAINED_FINISHED
+
+connect await RETAINED_FINISHED
+ "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${kafka:beginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+connected
+
+read zilla:data.ext ${kafka:matchDataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .filters(1)
+ .partition(0, 1, 2)
+ .progress(0, 2)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+
+read "message2"
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain.fragmented/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain.fragmented/server.rpt
new file mode 100644
index 0000000000..88749b722e
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/kafka/subscribe.retain.fragmented/server.rpt
@@ -0,0 +1,107 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/kafka0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-retained")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+connected
+
+write option zilla:flags "init"
+write zilla:data.ext ${kafka:dataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .timestamp(kafka:timestamp())
+ .filters(1)
+ .partition(0, 1, 2)
+ .progress(0, 2)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+
+write "mess"
+write flush
+
+write option zilla:flags "fin"
+write "age"
+write flush
+
+write advise zilla:flush
+
+read closed
+write close
+
+accepted
+
+read zilla:begin.ext ${kafka:matchBeginEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .capabilities("FETCH_ONLY")
+ .topic("mqtt-messages")
+ .filter()
+ .headers("zilla:filter")
+ .sequence("sensor")
+ .sequence("one")
+ .build()
+ .build()
+ .evaluation("EAGER")
+ .build()
+ .build()}
+
+connected
+
+write zilla:data.ext ${kafka:dataEx()
+ .typeId(zilla:id("kafka"))
+ .merged()
+ .fetch()
+ .timestamp(kafka:timestamp())
+ .filters(1)
+ .partition(0, 1, 2)
+ .progress(0, 2)
+ .progress(1, 1)
+ .key("sensor/one")
+ .header("zilla:filter", "sensor")
+ .header("zilla:filter", "one")
+ .header("zilla:local", "client")
+ .header("zilla:format", "TEXT")
+ .build()
+ .build()}
+
+write "message2"
+write flush
+
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/client.rpt
new file mode 100644
index 0000000000..84947bb6d0
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/client.rpt
@@ -0,0 +1,34 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/mqtt0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${mqtt:beginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .expiry(1)
+ .clientId("client-1")
+ .build()
+ .build()}
+
+read zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(131)
+ .build()}
+connect aborted
+
+
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/server.rpt
new file mode 100644
index 0000000000..798d86209c
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.describe.config/server.rpt
@@ -0,0 +1,34 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/mqtt0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${mqtt:matchBeginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .expiry(1)
+ .clientId("client-1")
+ .build()
+ .build()}
+
+write zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(131)
+ .build()}
+rejected
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/client.rpt
new file mode 100644
index 0000000000..0fa7aebe20
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/client.rpt
@@ -0,0 +1,35 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/mqtt0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${mqtt:beginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .expiry(1)
+ .clientId("client-1")
+ .build()
+ .build()}
+
+read zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(131)
+ .reason("Invalid session expiry interval")
+ .build()}
+connect aborted
+
+
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/server.rpt
new file mode 100644
index 0000000000..3b1335a150
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.invalid.session.timeout/server.rpt
@@ -0,0 +1,35 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/mqtt0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${mqtt:matchBeginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .expiry(1)
+ .clientId("client-1")
+ .build()
+ .build()}
+
+write zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(131)
+ .reason("Invalid session expiry interval")
+ .build()}
+rejected
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/client.rpt
new file mode 100644
index 0000000000..6383f9fd5d
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/client.rpt
@@ -0,0 +1,34 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/mqtt0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${mqtt:beginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .expiry(1)
+ .clientId("client-1")
+ .build()
+ .build()}
+
+read zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(135)
+ .build()}
+connect aborted
+
+
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/server.rpt
new file mode 100644
index 0000000000..866d39a084
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/session.group.reset.not.authorized/server.rpt
@@ -0,0 +1,34 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/mqtt0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${mqtt:matchBeginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .expiry(1)
+ .clientId("client-1")
+ .build()
+ .build()}
+
+write zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(135)
+ .build()}
+rejected
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.expire.message/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.expire.message/client.rpt
new file mode 100644
index 0000000000..e4a549d274
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.expire.message/client.rpt
@@ -0,0 +1,33 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/mqtt0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${mqtt:beginEx()
+ .typeId(zilla:id("mqtt"))
+ .subscribe()
+ .clientId("client")
+ .qos("AT_MOST_ONCE")
+ .filter("sensor/one", 1, "AT_LEAST_ONCE")
+ .build()
+ .build()}
+
+connected
+
+
+write close
+read closed
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.expire.message/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.expire.message/server.rpt
new file mode 100644
index 0000000000..6bdcfae9e6
--- /dev/null
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.expire.message/server.rpt
@@ -0,0 +1,34 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/mqtt0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${mqtt:matchBeginEx()
+ .typeId(zilla:id("mqtt"))
+ .subscribe()
+ .clientId("client")
+ .qos("AT_MOST_ONCE")
+ .filter("sensor/one", 1, "AT_LEAST_ONCE")
+ .build()
+ .build()}
+
+connected
+
+read closed
+write close
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt
index 0cf15cfa82..ea79a52fda 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/client.rpt
@@ -32,7 +32,6 @@ read zilla:data.ext ${mqtt:matchDataEx()
.subscribe()
.topic("sensor/one")
.subscriptionId(1)
- .expiryInterval(15)
.contentType("message")
.format("TEXT")
.responseTopic("sensor/one")
diff --git a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt
index d173fa405a..71c80a12ea 100644
--- a/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt
+++ b/specs/binding-mqtt-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/mqtt/subscribe.one.message.receive.response.topic.and.correlation.data/server.rpt
@@ -34,7 +34,6 @@ write zilla:data.ext ${mqtt:dataEx()
.subscribe()
.topic("sensor/one")
.subscriptionId(1)
- .expiryInterval(15)
.contentType("message")
.format("TEXT")
.responseTopic("sensor/one")
diff --git a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java
index b777b39bac..ecfb8a7917 100644
--- a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java
+++ b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/KafkaIT.java
@@ -341,6 +341,15 @@ public void shouldReceiveOneMessage() throws Exception
k3po.finish();
}
+ @Test
+ @Specification({
+ "${kafka}/subscribe.one.message.fragmented/client",
+ "${kafka}/subscribe.one.message.fragmented/server"})
+ public void shouldReceiveOneMessageFragmented() throws Exception
+ {
+ k3po.finish();
+ }
+
@Test
@Specification({
"${kafka}/subscribe.one.message.changed.topic.name/client",
@@ -386,6 +395,15 @@ public void shouldReceiveRetained() throws Exception
k3po.finish();
}
+ @Test
+ @Specification({
+ "${kafka}/subscribe.retain.fragmented/client",
+ "${kafka}/subscribe.retain.fragmented/server"})
+ public void shouldReceiveRetainedFragmented() throws Exception
+ {
+ k3po.finish();
+ }
+
@Test
@Specification({
"${kafka}/subscribe.receive.message.wildcard/client",
@@ -917,4 +935,22 @@ public void shouldReceiveMessageOverlappingWildcardMixedQos() throws Exception
{
k3po.finish();
}
+
+ @Test
+ @Specification({
+ "${kafka}/subscribe.expire.message/client",
+ "${kafka}/subscribe.expire.message/server"})
+ public void shouldExpireMessage() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Specification({
+ "${kafka}/subscribe.expire.message.fragmented/client",
+ "${kafka}/subscribe.expire.message.fragmented/server"})
+ public void shouldExpireMessageFragmented() throws Exception
+ {
+ k3po.finish();
+ }
}
diff --git a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java
index 837ef28fbe..995d5edc53 100644
--- a/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java
+++ b/specs/binding-mqtt-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/kafka/streams/MqttIT.java
@@ -757,4 +757,13 @@ public void shouldReceiveMessageOverlappingWildcardMixedQos() throws Exception
{
k3po.finish();
}
+
+ @Test
+ @Specification({
+ "${mqtt}/subscribe.expire.message/client",
+ "${mqtt}/subscribe.expire.message/server"})
+ public void shouldExpireMessage() throws Exception
+ {
+ k3po.finish();
+ }
}
diff --git a/specs/binding-mqtt.spec/pom.xml b/specs/binding-mqtt.spec/pom.xml
index 9a3569818b..a89a1cf473 100644
--- a/specs/binding-mqtt.spec/pom.xml
+++ b/specs/binding-mqtt.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java b/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java
index f48adabfbc..61e1124078 100644
--- a/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java
+++ b/specs/binding-mqtt.spec/src/main/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctions.java
@@ -773,6 +773,13 @@ public MqttResetExBuilder reasonCode(
return this;
}
+ public MqttResetExBuilder reason(
+ String reason)
+ {
+ resetExRW.reason(reason);
+ return this;
+ }
+
public byte[] build()
{
final MqttResetExFW resetEx = resetExRW.build();
diff --git a/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl b/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl
index aa3fa5e988..9c44728e19 100644
--- a/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl
+++ b/specs/binding-mqtt.spec/src/main/resources/META-INF/zilla/mqtt.idl
@@ -227,6 +227,7 @@ scope mqtt
{
string16 serverRef = null;
uint8 reasonCode = 0;
+ string16 reason = null;
}
union MqttFlushEx switch (uint8) extends core::stream::Extension
diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/client.rpt
new file mode 100644
index 0000000000..6b7b0ceec6
--- /dev/null
+++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/client.rpt
@@ -0,0 +1,51 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+connect "zilla://streams/app0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${mqtt:beginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .clientId("client")
+ .build()
+ .build()}
+
+read zilla:begin.ext ${mqtt:matchBeginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .flags("CLEAN_START")
+ .qosMax(2)
+ .packetSizeMax(66560)
+ .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS")
+ .clientId("client")
+ .build()
+ .build()}
+
+connected
+
+read zilla:data.empty
+
+read zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(131)
+ .reason("Invalid session expiry interval")
+ .build()}
+
+write aborted
+read abort
+
diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/server.rpt
new file mode 100644
index 0000000000..ec97e6429e
--- /dev/null
+++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.after.connack/server.rpt
@@ -0,0 +1,52 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+accept "zilla://streams/app0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${mqtt:matchBeginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .clientId("client")
+ .build()
+ .build()}
+
+write zilla:begin.ext ${mqtt:beginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .flags("CLEAN_START")
+ .qosMax(2)
+ .packetSizeMax(66560)
+ .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS")
+ .clientId("client")
+ .build()
+ .build()}
+
+connected
+
+write zilla:data.empty
+
+write zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(131)
+ .reason("Invalid session expiry interval")
+ .build()}
+
+read abort
+write aborted
diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/client.rpt
new file mode 100644
index 0000000000..e4e3a14673
--- /dev/null
+++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/client.rpt
@@ -0,0 +1,49 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+connect "zilla://streams/app0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${mqtt:beginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .clientId("client")
+ .build()
+ .build()}
+
+read zilla:begin.ext ${mqtt:matchBeginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .flags("CLEAN_START")
+ .qosMax(2)
+ .packetSizeMax(66560)
+ .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS")
+ .clientId("client")
+ .build()
+ .build()}
+
+connected
+
+read zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(131)
+ .reason("Invalid session expiry interval")
+ .build()}
+
+write aborted
+read abort
+
diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/server.rpt
new file mode 100644
index 0000000000..20e60a236f
--- /dev/null
+++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/application/session.invalid.session.timeout.before.connack/server.rpt
@@ -0,0 +1,50 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+accept "zilla://streams/app0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${mqtt:matchBeginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .clientId("client")
+ .build()
+ .build()}
+
+write zilla:begin.ext ${mqtt:beginEx()
+ .typeId(zilla:id("mqtt"))
+ .session()
+ .flags("CLEAN_START")
+ .qosMax(2)
+ .packetSizeMax(66560)
+ .capabilities("RETAIN", "WILDCARD", "SUBSCRIPTION_IDS", "SHARED_SUBSCRIPTIONS")
+ .clientId("client")
+ .build()
+ .build()}
+
+connected
+
+write zilla:reset.ext ${mqtt:resetEx()
+ .typeId(zilla:id("mqtt"))
+ .reasonCode(131)
+ .reason("Invalid session expiry interval")
+ .build()}
+
+read abort
+write aborted
diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.after.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.after.connack/client.rpt
new file mode 100644
index 0000000000..0dbe81475a
--- /dev/null
+++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.after.connack/client.rpt
@@ -0,0 +1,43 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+connect "zilla://streams/net0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write [0x10 0x13] # CONNECT
+ [0x00 0x04] "MQTT" # protocol name
+ [0x05] # protocol version
+ [0x02] # flags = clean start
+ [0x00 0x3c] # keep alive = 60s
+ [0x00] # properties = none
+ [0x00 0x06] "client" # client id
+
+read [0x20 0x08] # CONNACK
+ [0x00] # flags = none
+ [0x00] # reason code
+ [0x05] # properties
+ [0x27] 66560 # maximum packet size = 66560
+
+read [0xe0 0x24] # DISCONNECT
+ [0x83] # reason = implementation specific error
+ [0x22] # properties
+ [0x1f 0x00 0x1f] "Invalid session expiry interval" # reason string
+
+read closed
diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.after.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.after.connack/server.rpt
new file mode 100644
index 0000000000..0d2cdc0ba0
--- /dev/null
+++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.after.connack/server.rpt
@@ -0,0 +1,44 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+accept "zilla://streams/net0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+connected
+
+read [0x10 0x13] # CONNECT
+ [0x00 0x04] "MQTT" # protocol name
+ [0x05] # protocol version
+ [0x02] # flags = clean start
+ [0x00 0x3c] # keep alive = 60s
+ [0x00] # properties = none
+ [0x00 0x06] "client" # client id
+
+write [0x20 0x08] # CONNACK
+ [0x00] # flags = none
+ [0x00] # reason code
+ [0x05] # properties
+ [0x27] 66560 # maximum packet size = 66560
+
+write [0xe0 0x24] # DISCONNECT
+ [0x83] # reason = implementation specific error
+ [0x22] # properties
+ [0x1f 0x00 0x1f] "Invalid session expiry interval" # reason string
+
+write close
diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.before.connack/client.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.before.connack/client.rpt
new file mode 100644
index 0000000000..22d62d8bbb
--- /dev/null
+++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.before.connack/client.rpt
@@ -0,0 +1,38 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+connect "zilla://streams/net0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write [0x10 0x13] # CONNECT
+ [0x00 0x04] "MQTT" # protocol name
+ [0x05] # protocol version
+ [0x02] # flags = clean start
+ [0x00 0x3c] # keep alive = 60s
+ [0x00] # properties = none
+ [0x00 0x06] "client" # client id
+
+read [0x20 0x25] # CONNACK
+ [0x00] # flags = none
+ [0x83] # reason code = = implementation specific error
+ [0x22] # properties
+ [0x1f 0x00 0x1f] "Invalid session expiry interval" # reason string
+
+read closed
diff --git a/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.before.connack/server.rpt b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.before.connack/server.rpt
new file mode 100644
index 0000000000..4b86ea54af
--- /dev/null
+++ b/specs/binding-mqtt.spec/src/main/scripts/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/session.invalid.session.timeout.before.connack/server.rpt
@@ -0,0 +1,39 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+accept "zilla://streams/net0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+connected
+
+read [0x10 0x13] # CONNECT
+ [0x00 0x04] "MQTT" # protocol name
+ [0x05] # protocol version
+ [0x02] # flags = clean start
+ [0x00 0x3c] # keep alive = 60s
+ [0x00] # properties = none
+ [0x00 0x06] "client" # client id
+
+write [0x20 0x25] # CONNACK
+ [0x00] # flags = none
+ [0x83] # reason code = = implementation specific error
+ [0x22] # properties
+ [0x1f 0x00 0x1f] "Invalid session expiry interval" # reason string
+
+write close
diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java
index 5f93f65fe5..b40abe4eae 100644
--- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java
+++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/internal/MqttFunctionsTest.java
@@ -1218,12 +1218,14 @@ public void shouldEncodeMqttResetEx()
.typeId(0)
.serverRef("mqtt-1.example.com:1883")
.reasonCode(0)
+ .reason("test")
.build();
DirectBuffer buffer = new UnsafeBuffer(array);
MqttResetExFW mqttResetEx = new MqttResetExFW().wrap(buffer, 0, buffer.capacity());
assertEquals(0, mqttResetEx.typeId());
assertEquals("mqtt-1.example.com:1883", mqttResetEx.serverRef().asString());
+ assertEquals("test", mqttResetEx.reason().asString());
assertEquals(0, mqttResetEx.reasonCode());
}
diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java
index 3e136d862f..c1f139f2d6 100644
--- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java
+++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/application/SessionIT.java
@@ -208,4 +208,22 @@ public void shouldSubscribeAndPublishToNonDefaultRoute() throws Exception
{
k3po.finish();
}
+
+ @Test
+ @Specification({
+ "${app}/session.invalid.session.timeout.after.connack/client",
+ "${app}/session.invalid.session.timeout.after.connack/server"})
+ public void shouldPropagateMqttReasonCodeAndStringAfterConnack() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Specification({
+ "${app}/session.invalid.session.timeout.before.connack/client",
+ "${app}/session.invalid.session.timeout.before.connack/server"})
+ public void shouldPropagateMqttReasonCodeAndStringBeforeConnack() throws Exception
+ {
+ k3po.finish();
+ }
}
diff --git a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/SessionIT.java b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/SessionIT.java
index 24b48d162e..a0da7b4841 100644
--- a/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/SessionIT.java
+++ b/specs/binding-mqtt.spec/src/test/java/io/aklivity/zilla/specs/binding/mqtt/streams/network/v5/SessionIT.java
@@ -191,4 +191,22 @@ public void shouldSubscribeAndPublishToNonDefaultRoute() throws Exception
{
k3po.finish();
}
+
+ @Test
+ @Specification({
+ "${net}/session.invalid.session.timeout.after.connack/client",
+ "${net}/session.invalid.session.timeout.after.connack/server"})
+ public void shouldPropagateMqttReasonCodeAndStringAfterConnack() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Specification({
+ "${net}/session.invalid.session.timeout.before.connack/client",
+ "${net}/session.invalid.session.timeout.before.connack/server"})
+ public void shouldPropagateMqttReasonCodeAndStringBeforeConnack() throws Exception
+ {
+ k3po.finish();
+ }
}
diff --git a/specs/binding-proxy.spec/pom.xml b/specs/binding-proxy.spec/pom.xml
index d33f8a60f7..5c47109d1d 100644
--- a/specs/binding-proxy.spec/pom.xml
+++ b/specs/binding-proxy.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-sse-kafka.spec/pom.xml b/specs/binding-sse-kafka.spec/pom.xml
index c69f61f4ad..cd0906f1ec 100644
--- a/specs/binding-sse-kafka.spec/pom.xml
+++ b/specs/binding-sse-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-sse.spec/pom.xml b/specs/binding-sse.spec/pom.xml
index d57933af6e..0dd1d7aac6 100644
--- a/specs/binding-sse.spec/pom.xml
+++ b/specs/binding-sse.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-tcp.spec/pom.xml b/specs/binding-tcp.spec/pom.xml
index d1d363fde8..4ad08c1d3e 100644
--- a/specs/binding-tcp.spec/pom.xml
+++ b/specs/binding-tcp.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-tls.spec/pom.xml b/specs/binding-tls.spec/pom.xml
index 55f5ef527c..3872da0bef 100644
--- a/specs/binding-tls.spec/pom.xml
+++ b/specs/binding-tls.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/binding-ws.spec/pom.xml b/specs/binding-ws.spec/pom.xml
index 79856548a4..da5c8aaceb 100644
--- a/specs/binding-ws.spec/pom.xml
+++ b/specs/binding-ws.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/engine.spec/pom.xml b/specs/engine.spec/pom.xml
index a1bedcd2f5..372ef1ffb7 100644
--- a/specs/engine.spec/pom.xml
+++ b/specs/engine.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/exporter-prometheus.spec/pom.xml b/specs/exporter-prometheus.spec/pom.xml
index 28d2a33044..ec59e5de79 100644
--- a/specs/exporter-prometheus.spec/pom.xml
+++ b/specs/exporter-prometheus.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/guard-jwt.spec/pom.xml b/specs/guard-jwt.spec/pom.xml
index 365b0c0209..ee30a47cc7 100644
--- a/specs/guard-jwt.spec/pom.xml
+++ b/specs/guard-jwt.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/metrics-grpc.spec/pom.xml b/specs/metrics-grpc.spec/pom.xml
index 87767dfaf4..8bfaa3e5ba 100644
--- a/specs/metrics-grpc.spec/pom.xml
+++ b/specs/metrics-grpc.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/metrics-http.spec/pom.xml b/specs/metrics-http.spec/pom.xml
index ac75445c24..3d0a167701 100644
--- a/specs/metrics-http.spec/pom.xml
+++ b/specs/metrics-http.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/metrics-stream.spec/pom.xml b/specs/metrics-stream.spec/pom.xml
index 30cc827c02..cef234f692 100644
--- a/specs/metrics-stream.spec/pom.xml
+++ b/specs/metrics-stream.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/pom.xml b/specs/pom.xml
index d4001dbb8f..06c65358f1 100644
--- a/specs/pom.xml
+++ b/specs/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.62
+ 0.9.63
../pom.xml
diff --git a/specs/vault-filesystem.spec/pom.xml b/specs/vault-filesystem.spec/pom.xml
index 84931a25fe..b2a2a82f59 100644
--- a/specs/vault-filesystem.spec/pom.xml
+++ b/specs/vault-filesystem.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.62
+ 0.9.63
../pom.xml