1
+ /*
2
+ ring buff,an implement of cache
3
+ */
4
+
5
+ package array
6
+
7
+ import (
8
+ "errors"
9
+ "io"
10
+ )
11
+
12
+ var ErrOutOfRange = errors .New ("out of range" )
13
+
14
+
15
+ // ring buffer底层是一个数组,使用单调递增的左右下标来标识虚拟的存储空间,写到数组末尾之后再从头部开始写
16
+ type RingBuf struct {
17
+ begin int64 // 虚拟存储空间左边界
18
+ end int64 // 虚拟存储空间右边界
19
+ data []byte
20
+ index int // 数组可写下标,另外一种常见的优化是控制数组长度为2的n次方,然后用位操作代替取模
21
+ }
22
+
23
+ func NewRingBuf (size int ) (rb RingBuf ) {
24
+ rb .data = make ([]byte , size )
25
+ rb .begin = 0
26
+ rb .end = 0
27
+ rb .index = 0
28
+ return
29
+ }
30
+
31
+ func (rb * RingBuf ) Size () int64 {
32
+ return int64 (len (rb .data ))
33
+ }
34
+
35
+ func (rb * RingBuf ) Begin () int64 {
36
+ return rb .begin
37
+ }
38
+
39
+ func (rb * RingBuf ) End () int64 {
40
+ return rb .end
41
+ }
42
+
43
+ // 从尾部写入,写满数组后覆盖写头部,达到ring效果
44
+ func (rb * RingBuf ) Write (p []byte ) (n int , err error ) {
45
+ if len (p ) > len (rb .data ) {
46
+ err = ErrOutOfRange
47
+ return
48
+ }
49
+ for n < len (p ) {
50
+ written := copy (rb .data [rb .index :], p [n :])
51
+ rb .end += int64 (written )
52
+ n += written
53
+ rb .index += written
54
+ if rb .index >= len (rb .data ) { // 回到头部
55
+ rb .index -= len (rb .data )
56
+ }
57
+ }
58
+ if int (rb .end - rb .begin ) > len (rb .data ) {
59
+ rb .begin = rb .end - int64 (len (rb .data ))
60
+ }
61
+ return
62
+ }
63
+
64
+ // 从指定位置写入,写满数组后覆盖写头部,达到ring效果
65
+ func (rb * RingBuf ) WriteAt (p []byte , offset int64 ) (n int , err error ) {
66
+ if offset + int64 (len (p )) > rb .end || offset < rb .begin {
67
+ err = ErrOutOfRange
68
+ return
69
+ }
70
+ var writeOff int
71
+ if rb .end - rb .begin < int64 (len (rb .data )) {
72
+ writeOff = int (offset - rb .begin )
73
+ } else {
74
+ writeOff = rb .index + int (offset - rb .begin )
75
+ }
76
+ if writeOff > len (rb .data ) {
77
+ writeOff -= len (rb .data )
78
+ }
79
+ writeEnd := writeOff + int (rb .end - offset )
80
+ if writeEnd <= len (rb .data ) {
81
+ n = copy (rb .data [writeOff :writeEnd ], p )
82
+ } else {
83
+ n = copy (rb .data [writeOff :], p )
84
+ if n < len (p ) {
85
+ n += copy (rb .data [:writeEnd - len (rb .data )], p [n :])
86
+ }
87
+ }
88
+ return
89
+ }
90
+
91
+ func (rb * RingBuf ) ReadAt (p []byte , offset int64 ) (n int , err error ) {
92
+ if offset > rb .end || offset < rb .begin {
93
+ err = ErrOutOfRange
94
+ return
95
+ }
96
+ var readOff int
97
+ if rb .end - rb .begin < int64 (len (rb .data )) {
98
+ readOff = int (offset - rb .begin )
99
+ } else {
100
+ readOff = rb .index + int (offset - rb .begin )
101
+ }
102
+ if readOff >= len (rb .data ) {
103
+ readOff -= len (rb .data )
104
+ }
105
+ readEnd := readOff + int (rb .end - offset )
106
+ if readEnd <= len (rb .data ) {
107
+ n = copy (p , rb .data [readOff :readEnd ])
108
+ } else {
109
+ n = copy (p , rb .data [readOff :])
110
+ if n < len (p ) {
111
+ n += copy (p [n :], rb .data [:readEnd - len (rb .data )])
112
+ }
113
+ }
114
+ if n < len (p ) {
115
+ err = io .EOF
116
+ }
117
+ return
118
+ }
119
+
120
+ func (rb * RingBuf ) Evacuate (offset int64 , length int ) (newOff int64 ) {
121
+ if offset + int64 (length ) > rb .end || offset < rb .begin {
122
+ return - 1
123
+ }
124
+ var readOff int
125
+ if rb .end - rb .begin < int64 (len (rb .data )) {
126
+ readOff = int (offset - rb .begin )
127
+ } else {
128
+ readOff = rb .index + int (offset - rb .begin )
129
+ }
130
+ if readOff >= len (rb .data ) {
131
+ readOff -= len (rb .data )
132
+ }
133
+
134
+ if readOff == rb .index {
135
+ // no copy evacuate
136
+ rb .index += length
137
+ if rb .index >= len (rb .data ) {
138
+ rb .index -= len (rb .data )
139
+ }
140
+ } else if readOff < rb .index {
141
+ var n = copy (rb .data [rb .index :], rb .data [readOff :readOff + length ])
142
+ rb .index += n
143
+ if rb .index == len (rb .data ) {
144
+ rb .index = copy (rb .data , rb .data [readOff + n :readOff + length ])
145
+ }
146
+ } else {
147
+ var readEnd = readOff + length
148
+ var n int
149
+ if readEnd <= len (rb .data ) {
150
+ n = copy (rb .data [rb .index :], rb .data [readOff :readEnd ])
151
+ rb .index += n
152
+ } else {
153
+ n = copy (rb .data [rb .index :], rb .data [readOff :])
154
+ rb .index += n
155
+ var tail = length - n
156
+ n = copy (rb .data [rb .index :], rb .data [:tail ])
157
+ rb .index += n
158
+ if rb .index == len (rb .data ) {
159
+ rb .index = copy (rb .data , rb .data [n :tail ])
160
+ }
161
+ }
162
+ }
163
+ newOff = rb .end
164
+ rb .end += int64 (length )
165
+ if rb .begin < rb .end - int64 (len (rb .data )) {
166
+ rb .begin = rb .end - int64 (len (rb .data ))
167
+ }
168
+ return
169
+ }
170
+
171
+ func (rb * RingBuf ) Resize (newSize int ) {
172
+ if len (rb .data ) == newSize {
173
+ return
174
+ }
175
+ newData := make ([]byte , newSize )
176
+ var offset int
177
+ if rb .end - rb .begin == int64 (len (rb .data )) {
178
+ offset = rb .index
179
+ }
180
+ if int (rb .end - rb .begin ) > newSize {
181
+ discard := int (rb .end - rb .begin ) - newSize
182
+ offset = (offset + discard ) % len (rb .data )
183
+ rb .begin = rb .end - int64 (newSize )
184
+ }
185
+ n := copy (newData , rb .data [offset :])
186
+ if n < newSize {
187
+ copy (newData [n :], rb .data [:offset ])
188
+ }
189
+ rb .data = newData
190
+ rb .index = 0
191
+ }
192
+
193
+ func (rb * RingBuf ) Skip (length int64 ) {
194
+ rb .end += length
195
+ rb .index += int (length )
196
+ for rb .index >= len (rb .data ) {
197
+ rb .index -= len (rb .data )
198
+ }
199
+ if int (rb .end - rb .begin ) > len (rb .data ) {
200
+ rb .begin = rb .end - int64 (len (rb .data ))
201
+ }
202
+ }
0 commit comments