Skip to content

Commit 5301800

Browse files
committed
chore: fix logging and tests
- Replace `.Info` calls with `.Warn` - Log closing/write errors in tests. - TestLoadBalancer now starts with negative score - that ensure that we also test backend tiers. Signed-off-by: Dmitriy Matrenichev <[email protected]>
1 parent b23a173 commit 5301800

File tree

4 files changed

+52
-20
lines changed

4 files changed

+52
-20
lines changed

controlplane/controlplane.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ func (lb *LoadBalancer) Start(upstreamCh <-chan []string) error {
118118
select {
119119
case upstreams := <-upstreamCh:
120120
if err := lb.lb.ReconcileRoute(lb.endpoint, upstreams); err != nil {
121-
lb.lb.Logger.Info("failed reconciling list of upstreams",
121+
lb.lb.Logger.Warn("failed reconciling list of upstreams",
122122
zap.Strings("upstreams", upstreams),
123123
zap.Error(err),
124124
)

controlplane/controlplane_test.go

Lines changed: 33 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,20 +12,24 @@ import (
1212
"testing"
1313
"time"
1414

15+
"github.com/siderolabs/gen/slices"
1516
"github.com/siderolabs/go-retry/retry"
1617
"github.com/stretchr/testify/assert"
1718
"github.com/stretchr/testify/require"
1819
"go.uber.org/goleak"
1920
"go.uber.org/zap/zaptest"
2021

2122
"github.com/siderolabs/go-loadbalancer/controlplane"
23+
"github.com/siderolabs/go-loadbalancer/upstream"
2224
)
2325

26+
//nolint:govet
2427
type mockUpstream struct {
28+
T testing.TB
29+
Identity string
30+
2531
addr string
2632
l net.Listener
27-
28-
identity string
2933
}
3034

3135
func (u *mockUpstream) Start() error {
@@ -50,13 +54,14 @@ func (u *mockUpstream) serve() {
5054
return
5155
}
5256

53-
c.Write([]byte(u.identity)) //nolint: errcheck
54-
c.Close() //nolint: errcheck
57+
_, err = c.Write([]byte(u.Identity))
58+
require.NoError(u.T, err)
59+
require.NoError(u.T, c.Close())
5560
}
5661
}
5762

5863
func (u *mockUpstream) Close() {
59-
u.l.Close() //nolint: errcheck
64+
require.NoError(u.T, u.l.Close())
6065
}
6166

6267
func TestLoadBalancer(t *testing.T) {
@@ -69,16 +74,24 @@ func TestLoadBalancer(t *testing.T) {
6974

7075
upstreams := make([]mockUpstream, upstreamCount)
7176
for i := range upstreams {
72-
upstreams[i].identity = strconv.Itoa(i)
77+
upstreams[i].T = t
78+
upstreams[i].Identity = strconv.Itoa(i)
7379
require.NoError(t, upstreams[i].Start())
7480
}
7581

76-
upstreamAddrs := make([]string, len(upstreams))
77-
for i := range upstreamAddrs {
78-
upstreamAddrs[i] = upstreams[i].addr
79-
}
80-
81-
lb, err := controlplane.NewLoadBalancer("localhost", 0, zaptest.NewLogger(t))
82+
upstreamAddrs := slices.Map(upstreams, func(u mockUpstream) string { return u.addr })
83+
84+
lb, err := controlplane.NewLoadBalancer(
85+
"localhost",
86+
0,
87+
zaptest.NewLogger(t),
88+
controlplane.WithHealthCheckOptions(
89+
// start with negative initlal score so that every healthcheck will be performed
90+
// at least once. It will also make upstream tiers.
91+
upstream.WithInitialScore(-1),
92+
upstream.WithHealthcheckInterval(10*time.Millisecond),
93+
),
94+
)
8295
require.NoError(t, err)
8396

8497
upstreamCh := make(chan []string)
@@ -93,17 +106,19 @@ func TestLoadBalancer(t *testing.T) {
93106
return 0, retry.ExpectedError(err)
94107
}
95108

96-
defer c.Close() //nolint:errcheck
109+
defer ensure(t, c.Close)
97110

98111
id, err := io.ReadAll(c)
99112
if err != nil {
100113
return 0, retry.ExpectedError(err)
114+
} else if len(id) == 0 {
115+
return 0, retry.ExpectedErrorf("zero length response")
101116
}
102117

103118
return strconv.Atoi(string(id))
104119
}
105120

106-
assert.NoError(t, retry.Constant(10*time.Second, retry.WithUnits(time.Second)).Retry(func() error {
121+
assert.NoError(t, retry.Constant(10*time.Second, retry.WithUnits(30*time.Millisecond)).Retry(func() error {
107122
identity, err := readIdentity()
108123
if err != nil {
109124
return err
@@ -170,3 +185,7 @@ func TestLoadBalancer(t *testing.T) {
170185

171186
assert.NoError(t, lb.Shutdown())
172187
}
188+
189+
func ensure(t *testing.T, closer func() error) {
190+
require.NoError(t, closer())
191+
}

loadbalancer/node.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ func (upstream node) healthCheck(ctx context.Context) error {
3131

3232
c, err := d.DialContext(ctx, "tcp", upstream.address)
3333
if err != nil {
34-
upstream.logger.Info("healthcheck failed", zap.String("address", upstream.address), zap.Error(err))
34+
upstream.logger.Warn("healthcheck failed", zap.String("address", upstream.address), zap.Error(err))
3535

3636
return err
3737
}

loadbalancer/target.go

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,18 @@ type lbTarget struct {
2525
func (target *lbTarget) HandleConn(conn net.Conn) {
2626
upstreamBackend, err := target.list.Pick()
2727
if err != nil {
28-
target.logger.Info(
28+
target.logger.Warn(
2929
"no upstreams available, closing connection",
3030
zap.String("remote_addr", conn.RemoteAddr().String()),
3131
)
32-
conn.Close() //nolint: errcheck
32+
33+
if closeErr := conn.Close(); closeErr != nil {
34+
target.logger.Warn(
35+
"error closing connection",
36+
zap.String("remote_addr", conn.RemoteAddr().String()),
37+
zap.Error(closeErr),
38+
)
39+
}
3340

3441
return
3542
}
@@ -45,9 +52,15 @@ func (target *lbTarget) HandleConn(conn net.Conn) {
4552
upstreamTarget.KeepAlivePeriod = target.keepAlivePeriod
4653
upstreamTarget.TCPUserTimeout = target.tcpUserTimeout
4754
upstreamTarget.OnDialError = func(src net.Conn, dstDialErr error) {
48-
src.Close() //nolint: errcheck
55+
if err := src.Close(); err != nil {
56+
target.logger.Warn(
57+
"error closing connection",
58+
zap.String("remote_addr", src.RemoteAddr().String()),
59+
zap.Error(err),
60+
)
61+
}
4962

50-
target.logger.Info(
63+
target.logger.Warn(
5164
"error dialing upstream",
5265
zap.String("upstream_addr", upstreamBackend.address),
5366
zap.Error(dstDialErr),

0 commit comments

Comments
 (0)