@@ -10,36 +10,36 @@ import (
10
10
"strconv"
11
11
"testing"
12
12
13
- . "github.com/sashabaranov/go-openai"
13
+ "github.com/sashabaranov/go-openai"
14
14
"github.com/sashabaranov/go-openai/internal/test/checks"
15
15
)
16
16
17
17
func TestChatCompletionsStreamWrongModel (t * testing.T ) {
18
- config := DefaultConfig ("whatever" )
18
+ config := openai . DefaultConfig ("whatever" )
19
19
config .BaseURL = "http://localhost/v1"
20
- client := NewClientWithConfig (config )
20
+ client := openai . NewClientWithConfig (config )
21
21
ctx := context .Background ()
22
22
23
- req := ChatCompletionRequest {
23
+ req := openai. ChatCompletionRequest {
24
24
MaxTokens : 5 ,
25
25
Model : "ada" ,
26
- Messages : []ChatCompletionMessage {
26
+ Messages : []openai. ChatCompletionMessage {
27
27
{
28
- Role : ChatMessageRoleUser ,
28
+ Role : openai . ChatMessageRoleUser ,
29
29
Content : "Hello!" ,
30
30
},
31
31
},
32
32
}
33
33
_ , err := client .CreateChatCompletionStream (ctx , req )
34
- if ! errors .Is (err , ErrChatCompletionInvalidModel ) {
34
+ if ! errors .Is (err , openai . ErrChatCompletionInvalidModel ) {
35
35
t .Fatalf ("CreateChatCompletion should return ErrChatCompletionInvalidModel, but returned: %v" , err )
36
36
}
37
37
}
38
38
39
39
func TestCreateChatCompletionStream (t * testing.T ) {
40
40
client , server , teardown := setupOpenAITestServer ()
41
41
defer teardown ()
42
- server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , r * http.Request ) {
42
+ server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , _ * http.Request ) {
43
43
w .Header ().Set ("Content-Type" , "text/event-stream" )
44
44
45
45
// Send test responses
@@ -61,12 +61,12 @@ func TestCreateChatCompletionStream(t *testing.T) {
61
61
checks .NoError (t , err , "Write error" )
62
62
})
63
63
64
- stream , err := client .CreateChatCompletionStream (context .Background (), ChatCompletionRequest {
64
+ stream , err := client .CreateChatCompletionStream (context .Background (), openai. ChatCompletionRequest {
65
65
MaxTokens : 5 ,
66
- Model : GPT3Dot5Turbo ,
67
- Messages : []ChatCompletionMessage {
66
+ Model : openai . GPT3Dot5Turbo ,
67
+ Messages : []openai. ChatCompletionMessage {
68
68
{
69
- Role : ChatMessageRoleUser ,
69
+ Role : openai . ChatMessageRoleUser ,
70
70
Content : "Hello!" ,
71
71
},
72
72
},
@@ -75,15 +75,15 @@ func TestCreateChatCompletionStream(t *testing.T) {
75
75
checks .NoError (t , err , "CreateCompletionStream returned error" )
76
76
defer stream .Close ()
77
77
78
- expectedResponses := []ChatCompletionStreamResponse {
78
+ expectedResponses := []openai. ChatCompletionStreamResponse {
79
79
{
80
80
ID : "1" ,
81
81
Object : "completion" ,
82
82
Created : 1598069254 ,
83
- Model : GPT3Dot5Turbo ,
84
- Choices : []ChatCompletionStreamChoice {
83
+ Model : openai . GPT3Dot5Turbo ,
84
+ Choices : []openai. ChatCompletionStreamChoice {
85
85
{
86
- Delta : ChatCompletionStreamChoiceDelta {
86
+ Delta : openai. ChatCompletionStreamChoiceDelta {
87
87
Content : "response1" ,
88
88
},
89
89
FinishReason : "max_tokens" ,
@@ -94,10 +94,10 @@ func TestCreateChatCompletionStream(t *testing.T) {
94
94
ID : "2" ,
95
95
Object : "completion" ,
96
96
Created : 1598069255 ,
97
- Model : GPT3Dot5Turbo ,
98
- Choices : []ChatCompletionStreamChoice {
97
+ Model : openai . GPT3Dot5Turbo ,
98
+ Choices : []openai. ChatCompletionStreamChoice {
99
99
{
100
- Delta : ChatCompletionStreamChoiceDelta {
100
+ Delta : openai. ChatCompletionStreamChoiceDelta {
101
101
Content : "response2" ,
102
102
},
103
103
FinishReason : "max_tokens" ,
@@ -133,7 +133,7 @@ func TestCreateChatCompletionStream(t *testing.T) {
133
133
func TestCreateChatCompletionStreamError (t * testing.T ) {
134
134
client , server , teardown := setupOpenAITestServer ()
135
135
defer teardown ()
136
- server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , r * http.Request ) {
136
+ server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , _ * http.Request ) {
137
137
w .Header ().Set ("Content-Type" , "text/event-stream" )
138
138
139
139
// Send test responses
@@ -156,12 +156,12 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
156
156
checks .NoError (t , err , "Write error" )
157
157
})
158
158
159
- stream , err := client .CreateChatCompletionStream (context .Background (), ChatCompletionRequest {
159
+ stream , err := client .CreateChatCompletionStream (context .Background (), openai. ChatCompletionRequest {
160
160
MaxTokens : 5 ,
161
- Model : GPT3Dot5Turbo ,
162
- Messages : []ChatCompletionMessage {
161
+ Model : openai . GPT3Dot5Turbo ,
162
+ Messages : []openai. ChatCompletionMessage {
163
163
{
164
- Role : ChatMessageRoleUser ,
164
+ Role : openai . ChatMessageRoleUser ,
165
165
Content : "Hello!" ,
166
166
},
167
167
},
@@ -173,7 +173,7 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
173
173
_ , streamErr := stream .Recv ()
174
174
checks .HasError (t , streamErr , "stream.Recv() did not return error" )
175
175
176
- var apiErr * APIError
176
+ var apiErr * openai. APIError
177
177
if ! errors .As (streamErr , & apiErr ) {
178
178
t .Errorf ("stream.Recv() did not return APIError" )
179
179
}
@@ -183,7 +183,7 @@ func TestCreateChatCompletionStreamError(t *testing.T) {
183
183
func TestCreateChatCompletionStreamWithHeaders (t * testing.T ) {
184
184
client , server , teardown := setupOpenAITestServer ()
185
185
defer teardown ()
186
- server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , r * http.Request ) {
186
+ server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , _ * http.Request ) {
187
187
w .Header ().Set ("Content-Type" , "text/event-stream" )
188
188
w .Header ().Set (xCustomHeader , xCustomHeaderValue )
189
189
@@ -196,12 +196,12 @@ func TestCreateChatCompletionStreamWithHeaders(t *testing.T) {
196
196
checks .NoError (t , err , "Write error" )
197
197
})
198
198
199
- stream , err := client .CreateChatCompletionStream (context .Background (), ChatCompletionRequest {
199
+ stream , err := client .CreateChatCompletionStream (context .Background (), openai. ChatCompletionRequest {
200
200
MaxTokens : 5 ,
201
- Model : GPT3Dot5Turbo ,
202
- Messages : []ChatCompletionMessage {
201
+ Model : openai . GPT3Dot5Turbo ,
202
+ Messages : []openai. ChatCompletionMessage {
203
203
{
204
- Role : ChatMessageRoleUser ,
204
+ Role : openai . ChatMessageRoleUser ,
205
205
Content : "Hello!" ,
206
206
},
207
207
},
@@ -219,7 +219,7 @@ func TestCreateChatCompletionStreamWithHeaders(t *testing.T) {
219
219
func TestCreateChatCompletionStreamWithRatelimitHeaders (t * testing.T ) {
220
220
client , server , teardown := setupOpenAITestServer ()
221
221
defer teardown ()
222
- server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , r * http.Request ) {
222
+ server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , _ * http.Request ) {
223
223
w .Header ().Set ("Content-Type" , "text/event-stream" )
224
224
for k , v := range rateLimitHeaders {
225
225
switch val := v .(type ) {
@@ -239,12 +239,12 @@ func TestCreateChatCompletionStreamWithRatelimitHeaders(t *testing.T) {
239
239
checks .NoError (t , err , "Write error" )
240
240
})
241
241
242
- stream , err := client .CreateChatCompletionStream (context .Background (), ChatCompletionRequest {
242
+ stream , err := client .CreateChatCompletionStream (context .Background (), openai. ChatCompletionRequest {
243
243
MaxTokens : 5 ,
244
- Model : GPT3Dot5Turbo ,
245
- Messages : []ChatCompletionMessage {
244
+ Model : openai . GPT3Dot5Turbo ,
245
+ Messages : []openai. ChatCompletionMessage {
246
246
{
247
- Role : ChatMessageRoleUser ,
247
+ Role : openai . ChatMessageRoleUser ,
248
248
Content : "Hello!" ,
249
249
},
250
250
},
@@ -264,7 +264,7 @@ func TestCreateChatCompletionStreamWithRatelimitHeaders(t *testing.T) {
264
264
func TestCreateChatCompletionStreamErrorWithDataPrefix (t * testing.T ) {
265
265
client , server , teardown := setupOpenAITestServer ()
266
266
defer teardown ()
267
- server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , r * http.Request ) {
267
+ server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , _ * http.Request ) {
268
268
w .Header ().Set ("Content-Type" , "text/event-stream" )
269
269
270
270
// Send test responses
@@ -276,12 +276,12 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
276
276
checks .NoError (t , err , "Write error" )
277
277
})
278
278
279
- stream , err := client .CreateChatCompletionStream (context .Background (), ChatCompletionRequest {
279
+ stream , err := client .CreateChatCompletionStream (context .Background (), openai. ChatCompletionRequest {
280
280
MaxTokens : 5 ,
281
- Model : GPT3Dot5Turbo ,
282
- Messages : []ChatCompletionMessage {
281
+ Model : openai . GPT3Dot5Turbo ,
282
+ Messages : []openai. ChatCompletionMessage {
283
283
{
284
- Role : ChatMessageRoleUser ,
284
+ Role : openai . ChatMessageRoleUser ,
285
285
Content : "Hello!" ,
286
286
},
287
287
},
@@ -293,7 +293,7 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
293
293
_ , streamErr := stream .Recv ()
294
294
checks .HasError (t , streamErr , "stream.Recv() did not return error" )
295
295
296
- var apiErr * APIError
296
+ var apiErr * openai. APIError
297
297
if ! errors .As (streamErr , & apiErr ) {
298
298
t .Errorf ("stream.Recv() did not return APIError" )
299
299
}
@@ -303,7 +303,7 @@ func TestCreateChatCompletionStreamErrorWithDataPrefix(t *testing.T) {
303
303
func TestCreateChatCompletionStreamRateLimitError (t * testing.T ) {
304
304
client , server , teardown := setupOpenAITestServer ()
305
305
defer teardown ()
306
- server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , r * http.Request ) {
306
+ server .RegisterHandler ("/v1/chat/completions" , func (w http.ResponseWriter , _ * http.Request ) {
307
307
w .Header ().Set ("Content-Type" , "application/json" )
308
308
w .WriteHeader (429 )
309
309
@@ -317,18 +317,18 @@ func TestCreateChatCompletionStreamRateLimitError(t *testing.T) {
317
317
_ , err := w .Write (dataBytes )
318
318
checks .NoError (t , err , "Write error" )
319
319
})
320
- _ , err := client .CreateChatCompletionStream (context .Background (), ChatCompletionRequest {
320
+ _ , err := client .CreateChatCompletionStream (context .Background (), openai. ChatCompletionRequest {
321
321
MaxTokens : 5 ,
322
- Model : GPT3Dot5Turbo ,
323
- Messages : []ChatCompletionMessage {
322
+ Model : openai . GPT3Dot5Turbo ,
323
+ Messages : []openai. ChatCompletionMessage {
324
324
{
325
- Role : ChatMessageRoleUser ,
325
+ Role : openai . ChatMessageRoleUser ,
326
326
Content : "Hello!" ,
327
327
},
328
328
},
329
329
Stream : true ,
330
330
})
331
- var apiErr * APIError
331
+ var apiErr * openai. APIError
332
332
if ! errors .As (err , & apiErr ) {
333
333
t .Errorf ("TestCreateChatCompletionStreamRateLimitError did not return APIError" )
334
334
}
@@ -345,7 +345,7 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
345
345
client , server , teardown := setupAzureTestServer ()
346
346
defer teardown ()
347
347
server .RegisterHandler ("/openai/deployments/gpt-35-turbo/chat/completions" ,
348
- func (w http.ResponseWriter , r * http.Request ) {
348
+ func (w http.ResponseWriter , _ * http.Request ) {
349
349
w .Header ().Set ("Content-Type" , "application/json" )
350
350
w .WriteHeader (http .StatusTooManyRequests )
351
351
// Send test responses
@@ -355,13 +355,13 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
355
355
checks .NoError (t , err , "Write error" )
356
356
})
357
357
358
- apiErr := & APIError {}
359
- _ , err := client .CreateChatCompletionStream (context .Background (), ChatCompletionRequest {
358
+ apiErr := & openai. APIError {}
359
+ _ , err := client .CreateChatCompletionStream (context .Background (), openai. ChatCompletionRequest {
360
360
MaxTokens : 5 ,
361
- Model : GPT3Dot5Turbo ,
362
- Messages : []ChatCompletionMessage {
361
+ Model : openai . GPT3Dot5Turbo ,
362
+ Messages : []openai. ChatCompletionMessage {
363
363
{
364
- Role : ChatMessageRoleUser ,
364
+ Role : openai . ChatMessageRoleUser ,
365
365
Content : "Hello!" ,
366
366
},
367
367
},
@@ -387,7 +387,7 @@ func TestAzureCreateChatCompletionStreamRateLimitError(t *testing.T) {
387
387
}
388
388
389
389
// Helper funcs.
390
- func compareChatResponses (r1 , r2 ChatCompletionStreamResponse ) bool {
390
+ func compareChatResponses (r1 , r2 openai. ChatCompletionStreamResponse ) bool {
391
391
if r1 .ID != r2 .ID || r1 .Object != r2 .Object || r1 .Created != r2 .Created || r1 .Model != r2 .Model {
392
392
return false
393
393
}
@@ -402,7 +402,7 @@ func compareChatResponses(r1, r2 ChatCompletionStreamResponse) bool {
402
402
return true
403
403
}
404
404
405
- func compareChatStreamResponseChoices (c1 , c2 ChatCompletionStreamChoice ) bool {
405
+ func compareChatStreamResponseChoices (c1 , c2 openai. ChatCompletionStreamChoice ) bool {
406
406
if c1 .Index != c2 .Index {
407
407
return false
408
408
}
0 commit comments