Skip to content

Commit f74277a

Browse files
Graden ReaGraden Rea
Graden Rea
authored and
Graden Rea
committed
Add examples
1 parent 09198f1 commit f74277a

File tree

3 files changed

+180
-0
lines changed

3 files changed

+180
-0
lines changed

examples/chat_completion.js

+58
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
const Groq = require('groq');
2+
3+
const groq = new Groq();
4+
5+
async function main() {
6+
groq.chat.completions.create({
7+
//
8+
// Required parameters
9+
//
10+
messages: [
11+
// Set an optional system message. This sets the behavior of the
12+
// assistant and can be used to provide specific instructions for
13+
// how it should behave throughout the conversation.
14+
{
15+
"role": "system",
16+
"content": "you are a helpful assistant."
17+
},
18+
// Set a user message for the assistant to respond to.
19+
{
20+
"role": "user",
21+
"content": "Explain the importance of low latency LLMs",
22+
}
23+
],
24+
25+
// The language model which will generate the completion.
26+
model: "mixtral-8x7b-32768",
27+
28+
//
29+
// Optional parameters
30+
//
31+
32+
// Controls randomness: lowering results in less random completions.
33+
// As the temperature approaches zero, the model will become deterministic
34+
// and repetitive.
35+
temperature: 0.5,
36+
37+
// The maximum number of tokens to generate. Requests can use up to
38+
// 2048 tokens shared between prompt and completion.
39+
max_tokens: 1024,
40+
41+
// Controls diversity via nucleus sampling: 0.5 means half of all
42+
// likelihood-weighted options are considered.
43+
top_p: 1,
44+
45+
// A stop sequence is a predefined or user-specified text string that
46+
// signals an AI to stop generating content, ensuring its responses
47+
// remain focused and concise. Examples include punctuation marks and
48+
// markers like "[end]".
49+
stop: null,
50+
51+
// If set, partial message deltas will be sent.
52+
stream: false,
53+
}).then((chatCompletion) => {
54+
process.stdout.write(chatCompletion.choices[0]?.message?.content || '');
55+
})
56+
}
57+
58+
main();

examples/chat_completion_stop.js

+63
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
const Groq = require('groq');
2+
3+
const groq = new Groq();
4+
5+
async function main() {
6+
const stream = await groq.chat.completions.create({
7+
//
8+
// Required parameters
9+
//
10+
messages: [
11+
// Set an optional system message. This sets the behavior of the
12+
// assistant and can be used to provide specific instructions for
13+
// how it should behave throughout the conversation.
14+
{
15+
"role": "system",
16+
"content": "you are a helpful assistant."
17+
},
18+
// Set a user message for the assistant to respond to.
19+
{
20+
"role": "user",
21+
"content": "Start at 1 and count to 10. Separate each number with a comma and a space"
22+
}
23+
],
24+
25+
// The language model which will generate the completion.
26+
model: "mixtral-8x7b-32768",
27+
28+
//
29+
// Optional parameters
30+
//
31+
32+
// Controls randomness: lowering results in less random completions.
33+
// As the temperature approaches zero, the model will become deterministic
34+
// and repetitive.
35+
temperature: 0.5,
36+
37+
// The maximum number of tokens to generate. Requests can use up to
38+
// 2048 tokens shared between prompt and completion.
39+
max_tokens: 1024,
40+
41+
// Controls diversity via nucleus sampling: 0.5 means half of all
42+
// likelihood-weighted options are considered.
43+
top_p: 1,
44+
45+
// A stop sequence is a predefined or user-specified text string that
46+
// signals an AI to stop generating content, ensuring its responses
47+
// remain focused and concise. Examples include punctuation marks and
48+
// markers like "[end]".
49+
//
50+
// For this example, we will use ", 6" so that the llm stops counting at 5.
51+
// If multiple stop values are needed, an array of string may be passed,
52+
// stop: [", 6", ", six", ", Six"]
53+
stop: ", 6",
54+
55+
// If set, partial message deltas will be sent.
56+
stream: true,
57+
});
58+
for await (const chunk of stream) {
59+
process.stdout.write(chunk.choices[0]?.delta?.content || '');
60+
}
61+
}
62+
63+
main();

examples/chat_completion_streaming.js

+59
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
const Groq = require('groq');
2+
3+
const groq = new Groq();
4+
5+
async function main() {
6+
const stream = await groq.chat.completions.create({
7+
//
8+
// Required parameters
9+
//
10+
messages: [
11+
// Set an optional system message. This sets the behavior of the
12+
// assistant and can be used to provide specific instructions for
13+
// how it should behave throughout the conversation.
14+
{
15+
"role": "system",
16+
"content": "you are a helpful assistant."
17+
},
18+
// Set a user message for the assistant to respond to.
19+
{
20+
"role": "user",
21+
"content": "Explain the importance of low latency LLMs",
22+
}
23+
],
24+
25+
// The language model which will generate the completion.
26+
model: "mixtral-8x7b-32768",
27+
28+
//
29+
// Optional parameters
30+
//
31+
32+
// Controls randomness: lowering results in less random completions.
33+
// As the temperature approaches zero, the model will become deterministic
34+
// and repetitive.
35+
temperature: 0.5,
36+
37+
// The maximum number of tokens to generate. Requests can use up to
38+
// 2048 tokens shared between prompt and completion.
39+
max_tokens: 1024,
40+
41+
// Controls diversity via nucleus sampling: 0.5 means half of all
42+
// likelihood-weighted options are considered.
43+
top_p: 1,
44+
45+
// A stop sequence is a predefined or user-specified text string that
46+
// signals an AI to stop generating content, ensuring its responses
47+
// remain focused and concise. Examples include punctuation marks and
48+
// markers like "[end]".
49+
stop: null,
50+
51+
// If set, partial message deltas will be sent.
52+
stream: true,
53+
});
54+
for await (const chunk of stream) {
55+
process.stdout.write(chunk.choices[0]?.delta?.content || '');
56+
}
57+
}
58+
59+
main();

0 commit comments

Comments
 (0)