Skip to content

Commit 6bd6f7b

Browse files
committed
update
1 parent de980b6 commit 6bd6f7b

File tree

5 files changed

+8
-8
lines changed

5 files changed

+8
-8
lines changed

production_demo/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,11 @@ And for each child workflow, for each step you can see how long the function sta
6363
- Python 3.10 or higher
6464
- Poetry (for dependency management)
6565
- Docker (for running the Restack services)
66-
- Local LLM provider (we use LMStudio and a Meta Llama 3.1 8B Instruct 4bit model in this example)
66+
- Local LLM provider (we use LMStudio and a Meta Llama 3.2 3B Instruct 4bit model in this example)
6767

6868
## Start LM stduio for local LLM provider
6969

70-
Start local server with an open source model like llama-3.3-70b-instruct.
70+
Start local server with an open source model like llama-3.2-3b-instruct
7171

7272
https://lmstudio.ai
7373

production_demo/schedule_workflow.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ async def main():
66

77
client = Restack()
88

9-
workflow_id = f"{int(time.time() * 1000)}-ExampleWorkflow"
9+
workflow_id = f"{int(time.time() * 1000)}-ChildWorkflow"
1010
run_id = await client.schedule_workflow(
1111
workflow_name="ChildWorkflow",
1212
workflow_id=workflow_id

production_demo/src/functions/evaluate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ async def llm_evaluate(input: EvaluateInput) -> str:
2727

2828
try:
2929
response = client.chat.completions.create(
30-
model="llama-3.3-70b-instruct",
30+
model="llama-3.2-3b-instruct",
3131
messages=[
3232
{
3333
"role": "user",

production_demo/src/functions/generate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ async def llm_generate(input: GenerateInput) -> str:
1717

1818
try:
1919
response = client.chat.completions.create(
20-
model="llama-3.3-70b-instruct",
20+
model="llama-3.2-3b-instruct",
2121
messages=[
2222
{
2323
"role": "user",

production_demo/src/workflows/child.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from src.functions.evaluate import llm_evaluate, EvaluateInput
99

1010
class ChildWorkflowInput(BaseModel):
11-
name: str = Field(default='John Doe')
11+
prompt: str = Field(default="Generate a random joke in max 20 words.")
1212

1313
@workflow.defn()
1414
class ChildWorkflow:
@@ -21,7 +21,7 @@ async def run(self, input: ChildWorkflowInput):
2121

2222
generated_text = await workflow.step(
2323
llm_generate,
24-
GenerateInput(prompt="Generate a random joke in max 20 words."),
24+
GenerateInput(prompt=input.prompt),
2525
task_queue="llm",
2626
start_to_close_timeout=timedelta(minutes=2)
2727
)
@@ -30,7 +30,7 @@ async def run(self, input: ChildWorkflowInput):
3030
llm_evaluate,
3131
EvaluateInput(generated_text=generated_text),
3232
task_queue="llm",
33-
start_to_close_timeout=timedelta(minutes=2)
33+
start_to_close_timeout=timedelta(minutes=5)
3434
)
3535

3636
return {

0 commit comments

Comments
 (0)