Skip to content

Commit de980b6

Browse files
committed
fix inputs
1 parent 539d5c1 commit de980b6

File tree

5 files changed

+11
-11
lines changed

5 files changed

+11
-11
lines changed

production_demo/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ And for each child workflow, for each step you can see how long the function sta
6767

6868
## Start LM stduio for local LLM provider
6969

70-
Start local server with Meta Llama 3.1 8B Instruct 4bit model
70+
Start local server with an open source model like llama-3.3-70b-instruct.
7171

7272
https://lmstudio.ai
7373

production_demo/src/functions/evaluate.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ class EvaluateInput(BaseModel):
88
@function.defn()
99
async def llm_evaluate(input: EvaluateInput) -> str:
1010
try:
11-
client = OpenAI(base_url="http://192.168.4.142:1234/v1/",api_key="llmstudio")
11+
client = OpenAI(base_url="http://192.168.205.1:1234/v1/",api_key="llmstudio")
1212
except Exception as e:
1313
log.error(f"Failed to create LLM client {e}")
1414
raise FunctionFailure(f"Failed to create OpenAI client {e}", non_retryable=True) from e
@@ -27,7 +27,7 @@ async def llm_evaluate(input: EvaluateInput) -> str:
2727

2828
try:
2929
response = client.chat.completions.create(
30-
model="llama-3.2-3b-instruct",
30+
model="llama-3.3-70b-instruct",
3131
messages=[
3232
{
3333
"role": "user",

production_demo/src/functions/generate.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,14 @@ class GenerateInput(BaseModel):
1010
async def llm_generate(input: GenerateInput) -> str:
1111

1212
try:
13-
client = OpenAI(base_url="http://192.168.4.142:1234/v1/",api_key="llmstudio")
13+
client = OpenAI(base_url="http://192.168.205.1:1234/v1/",api_key="llmstudio")
1414
except Exception as e:
1515
log.error(f"Failed to create LLM client {e}")
1616
raise FunctionFailure(f"Failed to create OpenAI client {e}", non_retryable=True) from e
1717

1818
try:
1919
response = client.chat.completions.create(
20-
model="llama-3.2-3b-instruct",
20+
model="llama-3.3-70b-instruct",
2121
messages=[
2222
{
2323
"role": "user",

production_demo/src/workflows/child.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44

55
with import_functions():
66
from src.functions.function import example_function
7-
from src.functions.generate import llm_generate
8-
from src.functions.evaluate import llm_evaluate
7+
from src.functions.generate import llm_generate, GenerateInput
8+
from src.functions.evaluate import llm_evaluate, EvaluateInput
99

1010
class ChildWorkflowInput(BaseModel):
1111
name: str = Field(default='John Doe')
@@ -21,14 +21,14 @@ async def run(self, input: ChildWorkflowInput):
2121

2222
generated_text = await workflow.step(
2323
llm_generate,
24-
"Generate a random joke in max 20 words.",
24+
GenerateInput(prompt="Generate a random joke in max 20 words."),
2525
task_queue="llm",
2626
start_to_close_timeout=timedelta(minutes=2)
2727
)
2828

2929
evaluation = await workflow.step(
3030
llm_evaluate,
31-
generated_text,
31+
EvaluateInput(generated_text=generated_text),
3232
task_queue="llm",
3333
start_to_close_timeout=timedelta(minutes=2)
3434
)

production_demo/src/workflows/workflow.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from .child import ChildWorkflow, ChildWorkflowInput
66

77
with import_functions():
8-
from src.functions.generate import llm_generate
8+
from src.functions.generate import llm_generate, GenerateInput
99

1010
class ExampleWorkflowInput(BaseModel):
1111
amount: int = Field(default=50)
@@ -35,7 +35,7 @@ async def run(self, input: ExampleWorkflowInput):
3535

3636
generated_text = await workflow.step(
3737
llm_generate,
38-
f"Give me the top 3 unique jokes according to the results. {results}",
38+
GenerateInput(prompt=f"Give me the top 3 unique jokes according to the results. {results}"),
3939
task_queue="llm",
4040
start_to_close_timeout=timedelta(minutes=2)
4141
)

0 commit comments

Comments
 (0)