Skip to content

Commit 8697a50

Browse files
committed
Merge branch 'master' into sbruens/caddy-ws
2 parents c1a1b01 + 3c24817 commit 8697a50

File tree

4 files changed

+113
-5
lines changed

4 files changed

+113
-5
lines changed

.goreleaser.yml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,3 +63,11 @@ changelog:
6363
exclude:
6464
- '^docs:'
6565
- '^test:'
66+
67+
git:
68+
# Sort tags by creation time when commit has more than one tag.
69+
tag_sort: -version:creatordate
70+
71+
# Specify prerelease suffix while sorting tags if there are more than one tag
72+
# in the same commit.
73+
prerelease_suffix: "-rc"

caddy/app.go

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package outlinecaddy
1919

2020
import (
2121
"errors"
22+
"fmt"
2223
"log/slog"
2324

2425
outline_prometheus "github.com/Jigsaw-Code/outline-ss-server/prometheus"
@@ -30,9 +31,14 @@ import (
3031
const outlineModuleName = "outline"
3132

3233
func init() {
34+
replayCache := outline.NewReplayCache(0)
3335
caddy.RegisterModule(ModuleRegistration{
34-
ID: outlineModuleName,
35-
New: func() caddy.Module { return new(OutlineApp) },
36+
ID: outlineModuleName,
37+
New: func() caddy.Module {
38+
app := new(OutlineApp)
39+
app.ReplayCache = replayCache
40+
return app
41+
},
3642
})
3743
}
3844

@@ -65,8 +71,9 @@ func (app *OutlineApp) Provision(ctx caddy.Context) error {
6571
app.logger.Info("provisioning app instance")
6672

6773
if app.ShadowsocksConfig != nil {
68-
// TODO: Persist replay cache across config reloads.
69-
app.ReplayCache = outline.NewReplayCache(app.ShadowsocksConfig.ReplayHistory)
74+
if err := app.ReplayCache.Resize(app.ShadowsocksConfig.ReplayHistory); err != nil {
75+
return fmt.Errorf("failed to configure replay history with capacity %d: %v", app.ShadowsocksConfig.ReplayHistory, err)
76+
}
7077
}
7178

7279
if err := app.defineMetrics(); err != nil {

service/replay.go

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ package service
1616

1717
import (
1818
"encoding/binary"
19+
"errors"
1920
"sync"
2021
)
2122

@@ -92,11 +93,25 @@ func (c *ReplayCache) Add(id string, salt []byte) bool {
9293
return false
9394
}
9495
_, inArchive := c.archive[hash]
95-
if len(c.active) == c.capacity {
96+
if len(c.active) >= c.capacity {
9697
// Discard the archive and move active to archive.
9798
c.archive = c.active
9899
c.active = make(map[uint32]empty, c.capacity)
99100
}
100101
c.active[hash] = empty{}
101102
return !inArchive
102103
}
104+
105+
// Resize adjusts the capacity of the ReplayCache.
106+
func (c *ReplayCache) Resize(capacity int) error {
107+
if capacity > MaxCapacity {
108+
return errors.New("ReplayCache capacity would result in too many false positives")
109+
}
110+
c.mutex.Lock()
111+
defer c.mutex.Unlock()
112+
c.capacity = capacity
113+
// NOTE: The active handshakes and archive lists are not explicitly shrunk.
114+
// Their sizes will naturally adjust as new handshakes are added and the cache
115+
// adheres to the updated capacity.
116+
return nil
117+
}

service/replay_test.go

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ package service
1717
import (
1818
"encoding/binary"
1919
"testing"
20+
21+
"github.com/stretchr/testify/assert"
22+
"github.com/stretchr/testify/require"
2023
)
2124

2225
const keyID = "the key"
@@ -91,6 +94,81 @@ func TestReplayCache_Archive(t *testing.T) {
9194
}
9295
}
9396

97+
func TestReplayCache_Resize(t *testing.T) {
98+
t.Run("Smaller resizes active and archive maps", func(t *testing.T) {
99+
salts := makeSalts(10)
100+
cache := NewReplayCache(5)
101+
for _, s := range salts {
102+
cache.Add(keyID, s)
103+
}
104+
105+
err := cache.Resize(3)
106+
107+
require.NoError(t, err)
108+
assert.Equal(t, cache.capacity, 3, "Expected capacity to be updated")
109+
110+
// Adding a new salt should trigger a shrinking of the active map as it hits the new
111+
// capacity immediately.
112+
cache.Add(keyID, salts[0])
113+
assert.Len(t, cache.active, 1, "Expected active handshakes length to have shrunk")
114+
assert.Len(t, cache.archive, 5, "Expected archive handshakes length to not have shrunk")
115+
116+
// Adding more new salts should eventually trigger a shrinking of the archive map as well,
117+
// when the shrunken active map gets moved to the archive.
118+
for _, s := range salts {
119+
cache.Add(keyID, s)
120+
}
121+
assert.Len(t, cache.archive, 3, "Expected archive handshakes length to have shrunk")
122+
})
123+
124+
t.Run("Larger resizes active and archive maps", func(t *testing.T) {
125+
salts := makeSalts(10)
126+
cache := NewReplayCache(5)
127+
for _, s := range salts {
128+
cache.Add(keyID, s)
129+
}
130+
131+
err := cache.Resize(10)
132+
133+
require.NoError(t, err)
134+
assert.Equal(t, cache.capacity, 10, "Expected capacity to be updated")
135+
assert.Len(t, cache.active, 5, "Expected active handshakes length not to have changed")
136+
assert.Len(t, cache.archive, 5, "Expected archive handshakes length not to have changed")
137+
})
138+
139+
t.Run("Still detect salts", func(t *testing.T) {
140+
salts := makeSalts(10)
141+
cache := NewReplayCache(5)
142+
for _, s := range salts {
143+
cache.Add(keyID, s)
144+
}
145+
146+
cache.Resize(10)
147+
148+
for _, s := range salts {
149+
if cache.Add(keyID, s) {
150+
t.Error("Should still be able to detect the salts after resizing")
151+
}
152+
}
153+
154+
cache.Resize(3)
155+
156+
for _, s := range salts {
157+
if cache.Add(keyID, s) {
158+
t.Error("Should still be able to detect the salts after resizing")
159+
}
160+
}
161+
})
162+
163+
t.Run("Exceeding maximum capacity", func(t *testing.T) {
164+
cache := &ReplayCache{}
165+
166+
err := cache.Resize(MaxCapacity + 1)
167+
168+
require.Error(t, err)
169+
})
170+
}
171+
94172
// Benchmark to determine the memory usage of ReplayCache.
95173
// Note that NewReplayCache only allocates the active set,
96174
// so the eventual memory usage will be roughly double.

0 commit comments

Comments
 (0)