Skip to content

Commit 75c5b10

Browse files
chore(deps): update ghcr.io/open-feature/flagd-testbed docker tag to v0.5.13 (#1068)
Signed-off-by: Todd Baert <[email protected]> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Todd Baert <[email protected]>
1 parent 6a84d05 commit 75c5b10

File tree

2 files changed

+30
-20
lines changed

2 files changed

+30
-20
lines changed

libs/providers/flagd/docker-compose.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
services:
22
flagd:
3-
image: ghcr.io/open-feature/flagd-testbed:v0.5.6
3+
image: ghcr.io/open-feature/flagd-testbed:v0.5.13
44
ports:
55
- 8013:8013
66
flagd-unstable:

libs/providers/flagd/src/e2e/step-definitions/flagd-reconnect.unstable.ts

+29-19
Original file line numberDiff line numberDiff line change
@@ -22,26 +22,36 @@ export function flagdRecconnectUnstable() {
2222
});
2323
});
2424

25-
test('Provider reconnection', ({ given, when, then, and }) => {
26-
given('a flagd provider is set', () => {
27-
// handled in beforeAll
28-
});
29-
when('a PROVIDER_READY handler and a PROVIDER_ERROR handler are added', () => {
30-
client.addHandler(ProviderEvents.Error, () => {
31-
errorRunCount++;
25+
describe('retry', () => {
26+
/**
27+
* This describe block and retry settings are calibrated to gRPC's retry time
28+
* and our testing container's restart cadence.
29+
*/
30+
const retryTimes = 240;
31+
const retryDelayMs = 1000;
32+
jest.retryTimes(retryTimes);
33+
34+
test('Provider reconnection', ({ given, when, then, and }) => {
35+
given('a flagd provider is set', () => {
36+
// handled in beforeAll
37+
});
38+
when('a PROVIDER_READY handler and a PROVIDER_ERROR handler are added', () => {
39+
client.addHandler(ProviderEvents.Error, () => {
40+
errorRunCount++;
41+
});
42+
});
43+
then('the PROVIDER_READY handler must run when the provider connects', async () => {
44+
// should already be at 1 from `beforeAll`
45+
expect(readyRunCount).toEqual(1);
46+
});
47+
and("the PROVIDER_ERROR handler must run when the provider's connection is lost", async () => {
48+
await new Promise((resolve) => setTimeout(resolve, retryDelayMs));
49+
expect(errorRunCount).toBeGreaterThan(0);
50+
});
51+
and('when the connection is reestablished the PROVIDER_READY handler must run again', async () => {
52+
await new Promise((resolve) => setTimeout(resolve, retryDelayMs));
53+
expect(readyRunCount).toBeGreaterThan(1);
3254
});
33-
});
34-
then('the PROVIDER_READY handler must run when the provider connects', async () => {
35-
// should already be at 1 from `beforeAll`
36-
expect(readyRunCount).toEqual(1);
37-
});
38-
and("the PROVIDER_ERROR handler must run when the provider's connection is lost", async () => {
39-
await new Promise((resolve) => setTimeout(resolve, 10000));
40-
expect(errorRunCount).toBeGreaterThan(0);
41-
});
42-
and('when the connection is reestablished the PROVIDER_READY handler must run again', async () => {
43-
await new Promise((resolve) => setTimeout(resolve, 10000));
44-
expect(readyRunCount).toBeGreaterThan(1);
4555
});
4656
});
4757

0 commit comments

Comments
 (0)