From 1214dcea69f5cd44a0093772838383de7b99afdd Mon Sep 17 00:00:00 2001 From: = Enea_Gore Date: Mon, 27 Jan 2025 20:10:13 +0100 Subject: [PATCH] lint --- .../divide_and_conquer/__init__.py | 1 - .../generate_suggestions.py | 41 ++++++++++++------- .../prompt_generate_suggestions.py | 2 +- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/modules/text/module_text_llm/module_text_llm/divide_and_conquer/__init__.py b/modules/text/module_text_llm/module_text_llm/divide_and_conquer/__init__.py index b75f453f..2ea42c33 100644 --- a/modules/text/module_text_llm/module_text_llm/divide_and_conquer/__init__.py +++ b/modules/text/module_text_llm/module_text_llm/divide_and_conquer/__init__.py @@ -1,5 +1,4 @@ from module_text_llm.approach_config import ApproachConfig -from pydantic import Field from typing import Literal from athena.text import Exercise, Submission from module_text_llm.divide_and_conquer.generate_suggestions import generate_suggestions diff --git a/modules/text/module_text_llm/module_text_llm/divide_and_conquer/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/divide_and_conquer/generate_suggestions.py index 5a85349c..5c3160a0 100644 --- a/modules/text/module_text_llm/module_text_llm/divide_and_conquer/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/divide_and_conquer/generate_suggestions.py @@ -23,48 +23,61 @@ async def generate_suggestions(exercise: Exercise, submission: Submission, confi for grading_instruction in criterion.structured_grading_instructions ) tasks = [] + for idx, criteria in enumerate(grading_criteria): - if(criteria.title == "Plagiarism" or criteria.title == "plagiarism"): # Exclude plagarism because the model cannot know and it hallucinates + processing_inputs = { + "model": model, + "prompt_input": prompt_input, + "exercise": exercise, + "submission": submission, + "grading_instruction_ids": grading_instruction_ids, + "is_graded": is_graded, + "criteria_title": criteria.title + } + if("plagiarism" in criteria.title.lower()): # Exclude plagarism because the model cannot know and it hallucinates continue usage_count, system_prompt = get_system_prompt(idx,exercise, criteria) if(usage_count == 1): chat_prompt = get_chat_prompt_with_formatting_instructions(model = model, system_message = system_prompt,human_message = get_human_message(),pydantic_object = FeedbackModel) - tasks.append(process_criteria(FeedbackModel, model, chat_prompt, prompt_input, exercise, submission, grading_instruction_ids, is_graded,criteria.title)) + processing_inputs["pydantic_object"] = FeedbackModel + processing_inputs["chat_prompt"] = chat_prompt else: chat_prompt = get_chat_prompt_with_formatting_instructions(model = model, system_message = system_prompt,human_message= get_human_message(),pydantic_object = AssessmentModel) - tasks.append(process_criteria(AssessmentModel, model, chat_prompt, prompt_input, exercise, submission, grading_instruction_ids, is_graded,criteria.title)) + processing_inputs["pydantic_object"] = AssessmentModel + processing_inputs["chat_prompt"] = chat_prompt + tasks.append(process_criteria(processing_inputs)) results = await asyncio.gather(*tasks) # Flatten the list of feedbacks for feedback_list in results: feedbacks += feedback_list - print(feedbacks) return feedbacks -async def process_criteria(pydantic_object, model, chat_prompt, prompt_input, exercise, submission, grading_instruction_ids, is_graded,criteria_title): +async def process_criteria(processing_inputs): + # Call the predict_and_parse method result = await predict_and_parse( - model=model, - chat_prompt=chat_prompt, - prompt_input=prompt_input, - pydantic_object=pydantic_object, + model=processing_inputs["model"], + chat_prompt=processing_inputs["chat_prompt"], + prompt_input=processing_inputs["prompt_input"], + pydantic_object=processing_inputs["pydantic_object"], tags=[ - f"exercise-{exercise.id}", - f"submission-{submission.id}", + f"exercise-{processing_inputs['exercise'].id}", + f"submission-{processing_inputs['submission'].id}", ], use_function_calling=True ) - if pydantic_object is AssessmentModel: + if processing_inputs["pydantic_object"] is AssessmentModel: try: - return parse_assessment_result(result, exercise, submission, grading_instruction_ids, is_graded,criteria_title) + return parse_assessment_result(result, processing_inputs['exercise'], processing_inputs['submission'], processing_inputs["grading_instruction_ids"], processing_inputs["is_graded"],processing_inputs["criteria_title"]) except Exception as e: logger.info("Failed to parse assessment result") return [] else: try: - return parse_feedback_result(result, exercise, submission, grading_instruction_ids, is_graded,criteria_title) + return parse_feedback_result(result, processing_inputs['exercise'], processing_inputs['submission'], processing_inputs["grading_instruction_ids"], processing_inputs["is_graded"],processing_inputs["criteria_title"]) except Exception as e: logger.info("Failed to parse feedback result") return [] diff --git a/modules/text/module_text_llm/module_text_llm/divide_and_conquer/prompt_generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/divide_and_conquer/prompt_generate_suggestions.py index 682fbdbb..ab924e14 100644 --- a/modules/text/module_text_llm/module_text_llm/divide_and_conquer/prompt_generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/divide_and_conquer/prompt_generate_suggestions.py @@ -17,7 +17,7 @@ def double_curly_braces(input_str): # Prompts are generated at run time. def get_system_prompt(index,exericse,cirteria:GradingCriterion): - system_prompt = f"""You are an AI Assistant TUTOR at a prestigious university tasked with assessing text submissions. You are tasked with assessing a submission from a student. The problem statement is:""" + system_prompt = """You are an AI Assistant TUTOR at a prestigious university tasked with assessing text submissions. You are tasked with assessing a submission from a student. The problem statement is:""" usage_count, formatted_criterion = format_divide_and_conquer_criteria(index,exericse,cirteria) return usage_count, system_prompt + formatted_criterion