Skip to content

Commit 58084f1

Browse files
Merge pull request #186 from nishio/patch-1
fix(translate): Correctly handle string returns from LLM
2 parents 661a209 + d842bef commit 58084f1

File tree

1 file changed

+7
-2
lines changed

1 file changed

+7
-2
lines changed

scatter/pipeline/steps/extraction.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,13 @@ def extract_arguments(input, prompt, model, retries=3):
4747
llm = ChatOpenAI(model_name=model, temperature=0.0)
4848
response = llm(messages=messages(prompt, input)).content.strip()
4949
try:
50-
parsed = [a.strip() for a in json.loads(response)]
51-
return parsed
50+
obj = json.loads(response)
51+
# LLM sometimes returns valid JSON string
52+
if isinstance(obj, str):
53+
obj = [obj]
54+
items = [a.strip() for a in obj]
55+
items = filter(None, items) # omit empty strings
56+
return items
5257
except json.decoder.JSONDecodeError as e:
5358
print("JSON error:", e)
5459
print("Input was:", input)

0 commit comments

Comments
 (0)