Skip to content

Commit 0a85a19

Browse files
committed
add other ollama visual llms
tosquash to squash w/ first
1 parent 14b579f commit 0a85a19

File tree

1 file changed

+9
-7
lines changed

1 file changed

+9
-7
lines changed

operate/models/apis.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ async def get_next_action(model, messages, objective, session_id):
5050
return "coming soon"
5151
if model == "gemini-pro-vision":
5252
return call_gemini_pro_vision(messages, objective), None
53-
if model == "llava":
54-
operation = call_ollama_llava(messages)
53+
if model == "llava" or model == "llava:13b" or "bakllava" or "llava-llama3":
54+
operation = call_ollama_llava(messages, model)
5555
return operation, None
5656
if model == "claude-3":
5757
operation = await call_claude_3_with_ocr(messages, objective, model)
@@ -558,9 +558,11 @@ async def call_gpt_4o_labeled(messages, objective, model):
558558
return call_gpt_4o(messages)
559559

560560

561-
def call_ollama_llava(messages):
561+
def call_ollama_llava(messages, model):
562+
if model == "":
563+
model = "llava"
562564
if config.verbose:
563-
print("[call_ollama_llava]")
565+
print(f"[call_ollama_llava] model {model}")
564566
time.sleep(1)
565567
try:
566568
model = config.initialize_ollama()
@@ -590,8 +592,8 @@ def call_ollama_llava(messages):
590592
}
591593
messages.append(vision_message)
592594

593-
response = model.chat(
594-
model="llava",
595+
response = ollama.chat(
596+
model=model,
595597
messages=messages,
596598
)
597599

@@ -633,7 +635,7 @@ def call_ollama_llava(messages):
633635
)
634636
if config.verbose:
635637
traceback.print_exc()
636-
return call_ollama_llava(messages)
638+
return call_ollama_llava(messages, model)
637639

638640

639641
async def call_claude_3_with_ocr(messages, objective, model):

0 commit comments

Comments
 (0)