@@ -53,7 +53,7 @@ async def get_next_action(model, messages, objective, session_id):
53
53
if model == "claude-3" :
54
54
operation = await call_claude_3_with_ocr (messages , objective , model )
55
55
return operation , None
56
- operation = call_ollama_llava (model , messages )
56
+ operation = call_ollama (model , messages )
57
57
return operation , None
58
58
59
59
def call_gpt_4o (messages ):
@@ -554,9 +554,9 @@ async def call_gpt_4o_labeled(messages, objective, model):
554
554
traceback .print_exc ()
555
555
return call_gpt_4o (messages )
556
556
557
- def call_ollama_llava (model , messages ):
557
+ def call_ollama (model , messages ):
558
558
if config .verbose :
559
- print (f"[call_ollama_llava ] model { model } " )
559
+ print (f"[call_ollama ] model { model } " )
560
560
time .sleep (1 )
561
561
try :
562
562
model = config .initialize_ollama ()
@@ -575,7 +575,7 @@ def call_ollama_llava(model, messages):
575
575
576
576
if config .verbose :
577
577
print (
578
- "[call_ollama_llava ] user_prompt" ,
578
+ "[call_ollama ] user_prompt" ,
579
579
user_prompt ,
580
580
)
581
581
@@ -603,7 +603,7 @@ def call_ollama_llava(model, messages):
603
603
assistant_message = {"role" : "assistant" , "content" : content }
604
604
if config .verbose :
605
605
print (
606
- "[call_ollama_llava ] content" ,
606
+ "[call_ollama ] content" ,
607
607
content ,
608
608
)
609
609
content = json .loads (content )
@@ -629,7 +629,7 @@ def call_ollama_llava(model, messages):
629
629
)
630
630
if config .verbose :
631
631
traceback .print_exc ()
632
- return call_ollama_llava (model , messages )
632
+ return call_ollama (model , messages )
633
633
634
634
635
635
async def call_claude_3_with_ocr (messages , objective , model ):
0 commit comments