1
1
# Groq Node API Library
2
2
3
- [ ![ NPM version] ( https://img.shields.io/npm/v/groq-sdk.svg )] ( https://npmjs.org/package/groq-sdk )
3
+ [ ![ NPM version] ( https://img.shields.io/npm/v/groq-sdk.svg )] ( https://npmjs.org/package/groq-sdk ) ![ npm bundle size ] ( https://img.shields.io/bundlephobia/minzip/groq-sdk )
4
4
5
5
This library provides convenient access to the Groq REST API from server-side TypeScript or JavaScript.
6
6
@@ -22,12 +22,14 @@ The full API of this library can be found in [api.md](api.md).
22
22
``` js
23
23
import Groq from ' groq-sdk' ;
24
24
25
- const groq = new Groq ();
25
+ const groq = new Groq ({
26
+ apiKey: process .env [' GROQ_API_KEY' ], // This is the default and can be omitted
27
+ });
26
28
27
29
async function main () {
28
30
const chatCompletion = await groq .chat .completions .create ({
29
31
messages: [{ role: ' user' , content: ' Explain the importance of low latency LLMs' }],
30
- model: ' mixtral-8x7b-32768 ' ,
32
+ model: ' llama3-8b-8192 ' ,
31
33
});
32
34
33
35
console .log (chatCompletion .choices [0 ].message .content );
@@ -44,15 +46,17 @@ This library includes TypeScript definitions for all request params and response
44
46
``` ts
45
47
import Groq from ' groq-sdk' ;
46
48
47
- const groq = new Groq ();
49
+ const groq = new Groq ({
50
+ apiKey: process .env [' GROQ_API_KEY' ], // This is the default and can be omitted
51
+ });
48
52
49
53
async function main() {
50
54
const params: Groq .Chat .CompletionCreateParams = {
51
55
messages: [
52
56
{ role: ' system' , content: ' You are a helpful assistant.' },
53
57
{ role: ' user' , content: ' Explain the importance of low latency LLMs' },
54
58
],
55
- model: ' mixtral-8x7b-32768 ' ,
59
+ model: ' llama3-8b-8192 ' ,
56
60
};
57
61
const chatCompletion: Groq .Chat .ChatCompletion = await groq .chat .completions .create (params );
58
62
}
@@ -77,7 +81,7 @@ async function main() {
77
81
{ role: ' system' , content: ' You are a helpful assistant.' },
78
82
{ role: ' user' , content: ' Explain the importance of low latency LLMs' },
79
83
],
80
- model: ' mixtral-8x7b-32768 ' ,
84
+ model: ' llama3-8b-8192 ' ,
81
85
})
82
86
.catch (async (err ) => {
83
87
if (err instanceof Groq .APIError ) {
@@ -122,7 +126,7 @@ const groq = new Groq({
122
126
});
123
127
124
128
// Or, configure per-request:
125
- await groq .chat .completions .create ({ messages: [{ role: ' system' , content: ' You are a helpful assistant.' }, { role: ' user' , content: ' Explain the importance of low latency LLMs' }], model: ' mixtral-8x7b-32768 ' }, {
129
+ await groq .chat .completions .create ({ messages: [{ role: ' system' , content: ' You are a helpful assistant.' }, { role: ' user' , content: ' Explain the importance of low latency LLMs' }], model: ' llama3-8b-8192 ' }, {
126
130
maxRetries: 5 ,
127
131
});
128
132
```
@@ -139,7 +143,7 @@ const groq = new Groq({
139
143
});
140
144
141
145
// Override per-request:
142
- await groq .chat .completions .create ({ messages: [{ role: ' system' , content: ' You are a helpful assistant.' }, { role: ' user' , content: ' Explain the importance of low latency LLMs' }], model: ' mixtral-8x7b-32768 ' }, {
146
+ await groq .chat .completions .create ({ messages: [{ role: ' system' , content: ' You are a helpful assistant.' }, { role: ' user' , content: ' Explain the importance of low latency LLMs' }], model: ' llama3-8b-8192 ' }, {
143
147
timeout: 5 * 1000 ,
144
148
});
145
149
```
@@ -166,7 +170,7 @@ const response = await groq.chat.completions
166
170
{ role: ' system' , content: ' You are a helpful assistant.' },
167
171
{ role: ' user' , content: ' Explain the importance of low latency LLMs' },
168
172
],
169
- model: ' mixtral-8x7b-32768 ' ,
173
+ model: ' llama3-8b-8192 ' ,
170
174
})
171
175
.asResponse ();
172
176
console .log (response .headers .get (' X-My-Header' ));
@@ -178,7 +182,7 @@ const { data: chatCompletion, response: raw } = await groq.chat.completions
178
182
{ role: ' system' , content: ' You are a helpful assistant.' },
179
183
{ role: ' user' , content: ' Explain the importance of low latency LLMs' },
180
184
],
181
- model: ' mixtral-8x7b-32768 ' ,
185
+ model: ' llama3-8b-8192 ' ,
182
186
})
183
187
.withResponse ();
184
188
console .log (raw .headers .get (' X-My-Header' ));
@@ -292,7 +296,7 @@ await groq.chat.completions.create(
292
296
{ role: ' system' , content: ' You are a helpful assistant.' },
293
297
{ role: ' user' , content: ' Explain the importance of low latency LLMs' },
294
298
],
295
- model: ' mixtral-8x7b-32768 ' ,
299
+ model: ' llama3-8b-8192 ' ,
296
300
},
297
301
{
298
302
httpAgent: new http .Agent ({ keepAlive: false }),
0 commit comments