Skip to content

Commit 69feb04

Browse files
committed
simple llamaindex example, fix module
1 parent 03c128b commit 69feb04

File tree

14 files changed

+48
-98
lines changed

14 files changed

+48
-98
lines changed

examples/express-together-llamaindex/readme.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,13 @@ The server should be up at http://localhost:8000
3636
## Send a test request to the chatCompletionBasic workflow
3737

3838
```
39-
curl -X POST http://localhost:8000 -H "Content-Type: application/json" -d '{"workflowName": "chatCompletionBasic", "workflowId": "chatCompletionBasic-123", "name": "John"}'
39+
curl -X POST http://localhost:8000 -H "Content-Type: application/json" -d '{"workflowName": "chatCompletionBasic", "workflowId": "chatCompletionBasic", "input": {"name": "Martin"}}'
4040
```
4141

42+
## Test the simple RAG workflow for Llamaindex
43+
44+
curl -X POST http://localhost:8000 -H "Content-Type: application/json" -d '{"workflowName": "llamaindexTogetherSimple", "workflowId": "llamaindexTogetherSimple-123"}'
45+
4246
## Build and Run Docker Container
4347

4448
Build the Docker image and run the container with:

examples/express-together-llamaindex/src/functions/llamaindex/createDocument.ts

Lines changed: 0 additions & 7 deletions
This file was deleted.

examples/express-together-llamaindex/src/functions/llamaindex/createIndex.ts

Lines changed: 0 additions & 5 deletions
This file was deleted.
Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1 @@
1-
export * from './createDocument';
2-
export * from './createIndex';
3-
export * from './loadEssay';
4-
export * from './outputResponse';
5-
export * from './queryIndex';
1+
export * from './queryTogether';

examples/express-together-llamaindex/src/functions/llamaindex/loadEssay.ts

Lines changed: 0 additions & 5 deletions
This file was deleted.

examples/express-together-llamaindex/src/functions/llamaindex/outputResponse.ts

Lines changed: 0 additions & 15 deletions
This file was deleted.

examples/express-together-llamaindex/src/functions/llamaindex/queryIndex.ts

Lines changed: 0 additions & 13 deletions
This file was deleted.
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import { llamaIndexTogetherClient } from "./utils/llamaIndexTogetherClient";
2+
import { TogetherLLM } from "llamaindex";
3+
4+
export async function llamaIndexQueryTogether({ query, model }: { query: string, model: TogetherLLM["model"] }) {
5+
const client = llamaIndexTogetherClient({ model });
6+
7+
const response = await client.chat({
8+
messages: [{ role: "user", content: query }],
9+
});
10+
11+
return response;
12+
}
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import { TogetherLLM, Settings } from "llamaindex";
2+
3+
export function llamaIndexTogetherClient({ model }: { model: TogetherLLM["model"] }) {
4+
Settings.llm = new TogetherLLM({
5+
apiKey: process.env.TOGETHER_API_KEY,
6+
model: model,
7+
});
8+
return Settings.llm;
9+
}

examples/express-together-llamaindex/src/services.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
// Simple example to start two services in the same file
2-
import { togetherChatCompletionBasic } from "./functions";
2+
import { togetherChatCompletionBasic, llamaIndexQueryTogether } from "./functions";
33
import { client } from "./client";
44

55
export async function services() {
@@ -10,14 +10,15 @@ export async function services() {
1010
client.startService({
1111
workflowsPath,
1212
functions: {
13+
1314
// add other functions here
1415
},
1516
}),
1617
// Start the together service to queue function calls to the Together API with rate limiting
1718
// https://docs.together.ai/docs/rate-limits
1819
client.startService({
1920
taskQueue: 'together',
20-
functions: { togetherChatCompletionBasic },
21+
functions: { togetherChatCompletionBasic, llamaIndexQueryTogether },
2122
options: {
2223
rateLimit: (60 / 60), // 60 RPM -> 1 RPS
2324
},

0 commit comments

Comments
 (0)