Skip to content

Commit 46a8a76

Browse files
Merge pull request #130 from restackio/fixProdExample
Production demo fix inputs
2 parents 0fd391c + afd6868 commit 46a8a76

File tree

7 files changed

+16
-18
lines changed

7 files changed

+16
-18
lines changed

production_demo/README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,11 @@ And for each child workflow, for each step you can see how long the function sta
6363
- Python 3.10 or higher
6464
- Poetry (for dependency management)
6565
- Docker (for running the Restack services)
66-
- Local LLM provider (we use LMStudio and a Meta Llama 3.1 8B Instruct 4bit model in this example)
66+
- Local LLM provider (we use LMStudio and a Meta Llama 3.2 3B Instruct 4bit model in this example)
6767

6868
## Start LM stduio for local LLM provider
6969

70-
Start local server with Meta Llama 3.1 8B Instruct 4bit model
70+
Start local server with an open source model like llama-3.2-3b-instruct
7171

7272
https://lmstudio.ai
7373

production_demo/schedule_workflow.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ async def main():
66

77
client = Restack()
88

9-
workflow_id = f"{int(time.time() * 1000)}-ExampleWorkflow"
9+
workflow_id = f"{int(time.time() * 1000)}-ChildWorkflow"
1010
run_id = await client.schedule_workflow(
1111
workflow_name="ChildWorkflow",
1212
workflow_id=workflow_id

production_demo/src/functions/evaluate.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ class EvaluateInput(BaseModel):
88
@function.defn()
99
async def llm_evaluate(input: EvaluateInput) -> str:
1010
try:
11-
client = OpenAI(base_url="http://192.168.4.142:1234/v1/",api_key="llmstudio")
11+
client = OpenAI(base_url="http://192.168.205.1:1234/v1/",api_key="llmstudio")
1212
except Exception as e:
1313
log.error(f"Failed to create LLM client {e}")
1414
raise FunctionFailure(f"Failed to create OpenAI client {e}", non_retryable=True) from e

production_demo/src/functions/generate.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ class GenerateInput(BaseModel):
1010
async def llm_generate(input: GenerateInput) -> str:
1111

1212
try:
13-
client = OpenAI(base_url="http://192.168.4.142:1234/v1/",api_key="llmstudio")
13+
client = OpenAI(base_url="http://192.168.205.1:1234/v1/",api_key="llmstudio")
1414
except Exception as e:
1515
log.error(f"Failed to create LLM client {e}")
1616
raise FunctionFailure(f"Failed to create OpenAI client {e}", non_retryable=True) from e

production_demo/src/workflows/child.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,11 @@
44

55
with import_functions():
66
from src.functions.function import example_function
7-
from src.functions.generate import llm_generate
8-
from src.functions.evaluate import llm_evaluate
7+
from src.functions.generate import llm_generate, GenerateInput
8+
from src.functions.evaluate import llm_evaluate, EvaluateInput
99

1010
class ChildWorkflowInput(BaseModel):
11-
name: str = Field(default='John Doe')
11+
prompt: str = Field(default="Generate a random joke in max 20 words.")
1212

1313
@workflow.defn()
1414
class ChildWorkflow:
@@ -21,16 +21,16 @@ async def run(self, input: ChildWorkflowInput):
2121

2222
generated_text = await workflow.step(
2323
llm_generate,
24-
"Generate a random joke in max 20 words.",
24+
GenerateInput(prompt=input.prompt),
2525
task_queue="llm",
2626
start_to_close_timeout=timedelta(minutes=2)
2727
)
2828

2929
evaluation = await workflow.step(
3030
llm_evaluate,
31-
generated_text,
31+
EvaluateInput(generated_text=generated_text),
3232
task_queue="llm",
33-
start_to_close_timeout=timedelta(minutes=2)
33+
start_to_close_timeout=timedelta(minutes=5)
3434
)
3535

3636
return {

production_demo/src/workflows/workflow.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from .child import ChildWorkflow, ChildWorkflowInput
66

77
with import_functions():
8-
from src.functions.generate import llm_generate
8+
from src.functions.generate import llm_generate, GenerateInput
99

1010
class ExampleWorkflowInput(BaseModel):
1111
amount: int = Field(default=50)
@@ -35,7 +35,7 @@ async def run(self, input: ExampleWorkflowInput):
3535

3636
generated_text = await workflow.step(
3737
llm_generate,
38-
f"Give me the top 3 unique jokes according to the results. {results}",
38+
GenerateInput(prompt=f"Give me the top 3 unique jokes according to the results. {results}"),
3939
task_queue="llm",
4040
start_to_close_timeout=timedelta(minutes=2)
4141
)

quickstart/README.md

+3-5
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,13 @@ To start the Restack, use the following Docker command:
1616
docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:7233 ghcr.io/restackio/restack:main
1717
```
1818

19-
## Install dependencies and start services
19+
## Start python shell
2020

2121
```bash
22-
poetry env use 3.10
22+
poetry env use 3.10 && poetry shell
2323
```
2424

25-
```bash
26-
poetry shell
27-
```
25+
## Install dependencies
2826

2927
```bash
3028
poetry install

0 commit comments

Comments
 (0)