Skip to content

Commit 0664105

Browse files
authored
lint: fix linter warnings reported by golangci-lint (#522)
- Fix #519
1 parent 9e0232f commit 0664105

23 files changed

+425
-431
lines changed

api_integration_test.go

-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import (
99
"os"
1010
"testing"
1111

12-
. "github.com/sashabaranov/go-openai"
1312
"github.com/sashabaranov/go-openai/internal/test/checks"
1413
"github.com/sashabaranov/go-openai/jsonschema"
1514
)

audio_api_test.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ import (
1212
"strings"
1313
"testing"
1414

15-
. "github.com/sashabaranov/go-openai"
15+
"github.com/sashabaranov/go-openai"
1616
"github.com/sashabaranov/go-openai/internal/test"
1717
"github.com/sashabaranov/go-openai/internal/test/checks"
1818
)
@@ -26,7 +26,7 @@ func TestAudio(t *testing.T) {
2626

2727
testcases := []struct {
2828
name string
29-
createFn func(context.Context, AudioRequest) (AudioResponse, error)
29+
createFn func(context.Context, openai.AudioRequest) (openai.AudioResponse, error)
3030
}{
3131
{
3232
"transcribe",
@@ -48,7 +48,7 @@ func TestAudio(t *testing.T) {
4848
path := filepath.Join(dir, "fake.mp3")
4949
test.CreateTestFile(t, path)
5050

51-
req := AudioRequest{
51+
req := openai.AudioRequest{
5252
FilePath: path,
5353
Model: "whisper-3",
5454
}
@@ -57,7 +57,7 @@ func TestAudio(t *testing.T) {
5757
})
5858

5959
t.Run(tc.name+" (with reader)", func(t *testing.T) {
60-
req := AudioRequest{
60+
req := openai.AudioRequest{
6161
FilePath: "fake.webm",
6262
Reader: bytes.NewBuffer([]byte(`some webm binary data`)),
6363
Model: "whisper-3",
@@ -76,7 +76,7 @@ func TestAudioWithOptionalArgs(t *testing.T) {
7676

7777
testcases := []struct {
7878
name string
79-
createFn func(context.Context, AudioRequest) (AudioResponse, error)
79+
createFn func(context.Context, openai.AudioRequest) (openai.AudioResponse, error)
8080
}{
8181
{
8282
"transcribe",
@@ -98,13 +98,13 @@ func TestAudioWithOptionalArgs(t *testing.T) {
9898
path := filepath.Join(dir, "fake.mp3")
9999
test.CreateTestFile(t, path)
100100

101-
req := AudioRequest{
101+
req := openai.AudioRequest{
102102
FilePath: path,
103103
Model: "whisper-3",
104104
Prompt: "用简体中文",
105105
Temperature: 0.5,
106106
Language: "zh",
107-
Format: AudioResponseFormatSRT,
107+
Format: openai.AudioResponseFormatSRT,
108108
}
109109
_, err := tc.createFn(ctx, req)
110110
checks.NoError(t, err, "audio API error")

audio_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ func TestAudioWithFailingFormBuilder(t *testing.T) {
4040
}
4141

4242
var failForField string
43-
mockBuilder.mockWriteField = func(fieldname, value string) error {
43+
mockBuilder.mockWriteField = func(fieldname, _ string) error {
4444
if fieldname == failForField {
4545
return mockFailedErr
4646
}

chat_stream_test.go

+55-55
Original file line numberDiff line numberDiff line change
@@ -10,36 +10,36 @@ import (
1010
"strconv"
1111
"testing"
1212

13-
. "github.com/sashabaranov/go-openai"
13+
"github.com/sashabaranov/go-openai"
1414
"github.com/sashabaranov/go-openai/internal/test/checks"
1515
)
1616

1717
func TestChatCompletionsStreamWrongModel(t *testing.T) {
18-
config := DefaultConfig("whatever")
18+
config := openai.DefaultConfig("whatever")
1919
config.BaseURL = "http://localhost/v1"
20-
client := NewClientWithConfig(config)
20+
client := openai.NewClientWithConfig(config)
2121
ctx := context.Background()
2222

23-
req := ChatCompletionRequest{
23+
req := openai.ChatCompletionRequest{
2424
MaxTokens: 5,
2525
Model: "ada",
26-
Messages: []ChatCompletionMessage{
26+
Messages: []openai.ChatCompletionMessage{
2727
{
28-
Role: ChatMessageRoleUser,
28+
Role: openai.ChatMessageRoleUser,
2929
Content: "Hello!",
3030
},
3131
},
3232
}
3333
_, err := client.CreateChatCompletionStream(ctx, req)
34-
if !errors.Is(err, ErrChatCompletionInvalidModel) {
34+
if !errors.Is(err, openai.ErrChatCompletionInvalidModel) {
3535
t.Fatalf("CreateChatCompletion should return ErrChatCompletionInvalidModel, but returned: %v", err)
3636
}
3737
}
3838

3939
func TestCreateChatCompletionStream(t *testing.T) {
4040
client, server, teardown := setupOpenAITestServer()
4141
defer teardown()
42-
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
42+
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
4343
w.Header().Set("Content-Type", "text/event-stream")
4444

4545
// Send test responses
@@ -61,12 +61,12 @@ func TestCreateChatCompletionStream(t *testing.T) {
6161
checks.NoError(t, err, "Write error")
6262
})
6363

64-
stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
64+
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
6565
MaxTokens: 5,
66-
Model: GPT3Dot5Turbo,
67-
Messages: []ChatCompletionMessage{
66+
Model: openai.GPT3Dot5Turbo,
67+
Messages: []openai.ChatCompletionMessage{
6868
{
69-
Role: ChatMessageRoleUser,
69+
Role: openai.ChatMessageRoleUser,
7070
Content: "Hello!",
7171
},
7272
},
@@ -75,15 +75,15 @@ func TestCreateChatCompletionStream(t *testing.T) {
7575
checks.NoError(t, err, "CreateCompletionStream returned error")
7676
defer stream.Close()
7777

78-
expectedResponses := []ChatCompletionStreamResponse{
78+
expectedResponses := []openai.ChatCompletionStreamResponse{
7979
{
8080
ID: "1",
8181
Object: "completion",
8282
Created: 1598069254,
83-
Model: GPT3Dot5Turbo,
84-
Choices: []ChatCompletionStreamChoice{
83+
Model: openai.GPT3Dot5Turbo,
84+
Choices: []openai.ChatCompletionStreamChoice{
8585
{
86-
Delta: ChatCompletionStreamChoiceDelta{
86+
Delta: openai.ChatCompletionStreamChoiceDelta{
8787
Content: "response1",
8888
},
8989
FinishReason: "max_tokens",
@@ -94,10 +94,10 @@ func TestCreateChatCompletionStream(t *testing.T) {
9494
ID: "2",
9595
Object: "completion",
9696
Created: 1598069255,
97-
Model: GPT3Dot5Turbo,
98-
Choices: []ChatCompletionStreamChoice{
97+
Model: openai.GPT3Dot5Turbo,
98+
Choices: []openai.ChatCompletionStreamChoice{
9999
{
100-
Delta: ChatCompletionStreamChoiceDelta{
100+
Delta: openai.ChatCompletionStreamChoiceDelta{
101101
Content: "response2",
102102
},
103103
FinishReason: "max_tokens",
@@ -133,7 +133,7 @@ func TestCreateChatCompletionStream(t *testing.T) {
133133
func TestCreateChatCompletionStreamError(t *testing.T) {
134134
client, server, teardown := setupOpenAITestServer()
135135
defer teardown()
136-
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
136+
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
137137
w.Header().Set("Content-Type", "text/event-stream")
138138

139139
// Send test responses
@@ -156,12 +156,12 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
156156
checks.NoError(t, err, "Write error")
157157
})
158158

159-
stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
159+
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
160160
MaxTokens: 5,
161-
Model: GPT3Dot5Turbo,
162-
Messages: []ChatCompletionMessage{
161+
Model: openai.GPT3Dot5Turbo,
162+
Messages: []openai.ChatCompletionMessage{
163163
{
164-
Role: ChatMessageRoleUser,
164+
Role: openai.ChatMessageRoleUser,
165165
Content: "Hello!",
166166
},
167167
},
@@ -173,7 +173,7 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
173173
_, streamErr := stream.Recv()
174174
checks.HasError(t, streamErr, "stream.Recv() did not return error")
175175

176-
var apiErr *APIError
176+
var apiErr *openai.APIError
177177
if !errors.As(streamErr, &apiErr) {
178178
t.Errorf("stream.Recv() did not return APIError")
179179
}
@@ -183,7 +183,7 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
183183
func TestCreateChatCompletionStreamWithHeaders(t *testing.T) {
184184
client, server, teardown := setupOpenAITestServer()
185185
defer teardown()
186-
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
186+
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
187187
w.Header().Set("Content-Type", "text/event-stream")
188188
w.Header().Set(xCustomHeader, xCustomHeaderValue)
189189

@@ -196,12 +196,12 @@ func TestCreateChatCompletionStreamWithHeaders(t *testing.T) {
196196
checks.NoError(t, err, "Write error")
197197
})
198198

199-
stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
199+
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
200200
MaxTokens: 5,
201-
Model: GPT3Dot5Turbo,
202-
Messages: []ChatCompletionMessage{
201+
Model: openai.GPT3Dot5Turbo,
202+
Messages: []openai.ChatCompletionMessage{
203203
{
204-
Role: ChatMessageRoleUser,
204+
Role: openai.ChatMessageRoleUser,
205205
Content: "Hello!",
206206
},
207207
},
@@ -219,7 +219,7 @@ func TestCreateChatCompletionStreamWithHeaders(t *testing.T) {
219219
func TestCreateChatCompletionStreamWithRatelimitHeaders(t *testing.T) {
220220
client, server, teardown := setupOpenAITestServer()
221221
defer teardown()
222-
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
222+
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
223223
w.Header().Set("Content-Type", "text/event-stream")
224224
for k, v := range rateLimitHeaders {
225225
switch val := v.(type) {
@@ -239,12 +239,12 @@ func TestCreateChatCompletionStreamWithRatelimitHeaders(t *testing.T) {
239239
checks.NoError(t, err, "Write error")
240240
})
241241

242-
stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
242+
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
243243
MaxTokens: 5,
244-
Model: GPT3Dot5Turbo,
245-
Messages: []ChatCompletionMessage{
244+
Model: openai.GPT3Dot5Turbo,
245+
Messages: []openai.ChatCompletionMessage{
246246
{
247-
Role: ChatMessageRoleUser,
247+
Role: openai.ChatMessageRoleUser,
248248
Content: "Hello!",
249249
},
250250
},
@@ -264,7 +264,7 @@ func TestCreateChatCompletionStreamWithRatelimitHeaders(t *testing.T) {
264264
func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
265265
client, server, teardown := setupOpenAITestServer()
266266
defer teardown()
267-
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
267+
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
268268
w.Header().Set("Content-Type", "text/event-stream")
269269

270270
// Send test responses
@@ -276,12 +276,12 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
276276
checks.NoError(t, err, "Write error")
277277
})
278278

279-
stream, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
279+
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
280280
MaxTokens: 5,
281-
Model: GPT3Dot5Turbo,
282-
Messages: []ChatCompletionMessage{
281+
Model: openai.GPT3Dot5Turbo,
282+
Messages: []openai.ChatCompletionMessage{
283283
{
284-
Role: ChatMessageRoleUser,
284+
Role: openai.ChatMessageRoleUser,
285285
Content: "Hello!",
286286
},
287287
},
@@ -293,7 +293,7 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
293293
_, streamErr := stream.Recv()
294294
checks.HasError(t, streamErr, "stream.Recv() did not return error")
295295

296-
var apiErr *APIError
296+
var apiErr *openai.APIError
297297
if !errors.As(streamErr, &apiErr) {
298298
t.Errorf("stream.Recv() did not return APIError")
299299
}
@@ -303,7 +303,7 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
303303
func TestCreateChatCompletionStreamRateLimitError(t *testing.T) {
304304
client, server, teardown := setupOpenAITestServer()
305305
defer teardown()
306-
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, r *http.Request) {
306+
server.RegisterHandler("/v1/chat/completions", func(w http.ResponseWriter, _ *http.Request) {
307307
w.Header().Set("Content-Type", "application/json")
308308
w.WriteHeader(429)
309309

@@ -317,18 +317,18 @@ func TestCreateChatCompletionStreamRateLimitError(t *testing.T) {
317317
_, err := w.Write(dataBytes)
318318
checks.NoError(t, err, "Write error")
319319
})
320-
_, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
320+
_, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
321321
MaxTokens: 5,
322-
Model: GPT3Dot5Turbo,
323-
Messages: []ChatCompletionMessage{
322+
Model: openai.GPT3Dot5Turbo,
323+
Messages: []openai.ChatCompletionMessage{
324324
{
325-
Role: ChatMessageRoleUser,
325+
Role: openai.ChatMessageRoleUser,
326326
Content: "Hello!",
327327
},
328328
},
329329
Stream: true,
330330
})
331-
var apiErr *APIError
331+
var apiErr *openai.APIError
332332
if !errors.As(err, &apiErr) {
333333
t.Errorf("TestCreateChatCompletionStreamRateLimitError did not return APIError")
334334
}
@@ -345,7 +345,7 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
345345
client, server, teardown := setupAzureTestServer()
346346
defer teardown()
347347
server.RegisterHandler("/openai/deployments/gpt-35-turbo/chat/completions",
348-
func(w http.ResponseWriter, r *http.Request) {
348+
func(w http.ResponseWriter, _ *http.Request) {
349349
w.Header().Set("Content-Type", "application/json")
350350
w.WriteHeader(http.StatusTooManyRequests)
351351
// Send test responses
@@ -355,13 +355,13 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
355355
checks.NoError(t, err, "Write error")
356356
})
357357

358-
apiErr := &APIError{}
359-
_, err := client.CreateChatCompletionStream(context.Background(), ChatCompletionRequest{
358+
apiErr := &openai.APIError{}
359+
_, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
360360
MaxTokens: 5,
361-
Model: GPT3Dot5Turbo,
362-
Messages: []ChatCompletionMessage{
361+
Model: openai.GPT3Dot5Turbo,
362+
Messages: []openai.ChatCompletionMessage{
363363
{
364-
Role: ChatMessageRoleUser,
364+
Role: openai.ChatMessageRoleUser,
365365
Content: "Hello!",
366366
},
367367
},
@@ -387,7 +387,7 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
387387
}
388388

389389
// Helper funcs.
390-
func compareChatResponses(r1, r2 ChatCompletionStreamResponse) bool {
390+
func compareChatResponses(r1, r2 openai.ChatCompletionStreamResponse) bool {
391391
if r1.ID != r2.ID || r1.Object != r2.Object || r1.Created != r2.Created || r1.Model != r2.Model {
392392
return false
393393
}
@@ -402,7 +402,7 @@ func compareChatResponses(r1, r2 ChatCompletionStreamResponse) bool {
402402
return true
403403
}
404404

405-
func compareChatStreamResponseChoices(c1, c2 ChatCompletionStreamChoice) bool {
405+
func compareChatStreamResponseChoices(c1, c2 openai.ChatCompletionStreamChoice) bool {
406406
if c1.Index != c2.Index {
407407
return false
408408
}

0 commit comments

Comments
 (0)