diff --git a/athena/athena/__init__.py b/athena/athena/__init__.py index f7eee0079..68930d2f1 100644 --- a/athena/athena/__init__.py +++ b/athena/athena/__init__.py @@ -7,7 +7,7 @@ from .schemas import ExerciseType, GradingCriterion, StructuredGradingInstruction, StructuredGradingCriterion from .metadata import emit_meta, get_meta from .experiment import get_experiment_environment -from .endpoints import submission_selector, submissions_consumer, feedback_consumer, feedback_provider, config_schema_provider, evaluation_provider # type: ignore +from .endpoints import submission_selector, submissions_consumer,feedback_feeder, feedback_consumer, feedback_provider, config_schema_provider, evaluation_provider # type: ignore @app.get("/") def module_health(): @@ -29,6 +29,7 @@ def run_module(): "submissions_consumer", "feedback_consumer", "feedback_provider", + "feedback_feeder", "config_schema_provider", "evaluation_provider", "emit_meta", diff --git a/athena/athena/endpoints.py b/athena/athena/endpoints.py index 913df1a3b..e6783b0bf 100644 --- a/athena/athena/endpoints.py +++ b/athena/athena/endpoints.py @@ -242,7 +242,7 @@ def feedback_consumer(func: Union[ @authenticated @with_meta async def wrapper( - background_tasks: BackgroundTasks, + # background_tasks: BackgroundTasks, exercise: exercise_type, submission: submission_type, feedbacks: List[feedback_type], @@ -267,12 +267,96 @@ async def wrapper( kwargs["module_config"] = module_config # Call the actual consumer asynchronously - background_tasks.add_task(func, exercise, submission, feedbacks, **kwargs) + # background_tasks.add_task(func, exercise, submission, feedbacks, **kwargs) return None return wrapper +def feedback_feeder(func: Union[ + Callable[[E, S, List[F]], None], + Callable[[E, S, List[F]], Coroutine[Any, Any, None]], + Callable[[E, S, List[F], C], None], + Callable[[E, S, List[F], C], Coroutine[Any, Any, None]], + Callable[[E, S, List[F], G, C], Coroutine[Any, Any, None]], + Callable[[E, S, List[F], G, C],None], + Callable[[E, S, List[F], G], None], + Callable[[E, S, List[F], G], Coroutine[Any, Any, None]] +]): + """ + Receive feedback from the Assessment Module Manager. + The feedback consumer is usually called whenever the LMS gets feedback from a tutor. + + This decorator can be used with several types of functions: synchronous or asynchronous, with or without a module config. + + Examples: + Below are some examples of possible functions that you can decorate with this decorator: + + Without using module config (both synchronous and asynchronous forms): + >>> @feedback_consumer + ... def sync_process_feedback(exercise: Exercise, submission: Submission, feedbacks: List[Feedback]): + ... # process feedback here + + >>> @feedback_consumer + ... async def async_process_feedback(exercise: Exercise, submission: Submission, feedbacks: List[Feedback]): + ... # process feedback here + + With using module config (both synchronous and asynchronous forms): + >>> @feedback_consumer + ... def sync_process_feedback_with_config(exercise: Exercise, submission: Submission, feedbacks: List[Feedback], module_config: Optional[dict]): + ... # process feedback here using module_config + + >>> @feedback_consumer + ... async def async_process_feedback_with_config(exercise: Exercise, submission: Submission, feedbacks: List[Feedback], module_config: Optional[dict]): + ... # process feedback here using module_config + """ + exercise_type = inspect.signature(func).parameters["exercise"].annotation + submission_type = inspect.signature(func).parameters["submission"].annotation + feedback_type = inspect.signature(func).parameters["feedbacks"].annotation.__args__[0] + module_config_type = inspect.signature(func).parameters["module_config"].annotation if "module_config" in inspect.signature(func).parameters else None + use_for_continuous_learning_type = inspect.signature(func).parameters["use_for_continuous_learning"].annotation if "use_for_continuous_learning" in inspect.signature(func).parameters else None + @app.post("/feed_feedbacks", responses=module_responses) + @authenticated + @with_meta + async def wrapper( + # background_tasks: BackgroundTasks, + exercise: exercise_type, + submission: submission_type, + feedbacks: List[feedback_type], + useForContinuousLearning: use_for_continuous_learning_type = Body(default=False, alias="useForContinuousLearning"), + module_config: module_config_type = Depends(get_dynamic_module_config_factory(module_config_type))): + + # Retrieve existing metadata for the exercise, submission and feedback + exercise.meta.update(get_stored_exercise_meta(exercise) or {}) + store_exercise(exercise) + submission.meta.update(get_stored_submission_meta(submission) or {}) + store_submissions([submission]) + for feedback in feedbacks: + feedback.meta.update(get_stored_feedback_meta(feedback) or {}) + # Change the ID of the LMS to an internal ID + feedback.id = store_feedback(feedback, is_lms_id=True).id + + kwargs = {} + if "use_for_continuous_learning" in inspect.signature(func).parameters: + kwargs["use_for_continuous_learning"] = useForContinuousLearning + logger.info("It was in signature") + if "module_config" in inspect.signature(func).parameters: + kwargs["module_config"] = module_config + + if inspect.iscoroutinefunction(func): + result = await func(exercise, submission,feedbacks, **kwargs) + else: + result = func(exercise, submission,feedbacks, **kwargs) + + # Store feedback suggestions and assign internal IDs + # feedbacks = store_feedback_suggestions(feedbacks) + return result + # Call the actual consumer asynchronously + # background_tasks.add_task(func, exercise, submission, feedbacks, **kwargs) + + # return None + return wrapper + def feedback_provider(func: Union[ Callable[[E, S], List[F]], Callable[[E, S], Coroutine[Any, Any, List[F]]], diff --git a/modules/text/module_text_llm/internal_grading_instructions.json b/modules/text/module_text_llm/internal_grading_instructions.json index 2d71303e6..5ca52920b 100644 --- a/modules/text/module_text_llm/internal_grading_instructions.json +++ b/modules/text/module_text_llm/internal_grading_instructions.json @@ -22,7 +22,43 @@ "description": "The student submitted a complete response that meets the requirements for evaluation.", "credits": 3.0, "grading_instruction_id": 3, - "feedback_history": [] + "feedback_history": [ + { + "ai_referenced_text": "when we look at a well made computer program, we normally see multiple, in is self, densely connected modules that each provide a core functionality to the program.", + "tutor_referenced_text": "when we look at a well made computer program, we normally see multiple, in is self, densely connected modules that each provide a core functionality to the program.", + "ai_feedback": "There are several grammatical and spelling errors throughout your submission. For example, 'modures' should be 'modules', 'refered' should be 'referred', and 'maintanance' should be 'maintenance'. Additionally, 'in is self' should be 'in itself'. Please proofread your work to correct these errors.", + "tutor_feedback": "You correctly explained why coupling and cohesion are important, well done!", + "consistent": false + }, + { + "ai_referenced_text": "The dense connections within one module are refered to as cohesion.", + "tutor_referenced_text": "The dense connections within one module are refered to as cohesion.", + "ai_feedback": "Your explanation of cohesion and coupling is generally accurate, but it could benefit from more precise definitions. For example, you might want to elaborate on how cohesion and coupling impact software design and maintenance.", + "tutor_feedback": "You correctly explained the difference between coupling and cohesion, well done!", + "consistent": false + }, + { + "ai_referenced_text": "As an example we could look at a banking software.", + "tutor_referenced_text": "As an example we could look at a banking software.", + "ai_feedback": "The example of banking software is relevant, but it would be more effective if you explicitly linked it back to the concepts of cohesion and coupling. Explain how the example illustrates these principles in practice.", + "tutor_feedback": "Great example, well done!", + "consistent": false + }, + { + "ai_referenced_text": "each instance is highly connected in it self ( or is made up of an other layer of modules for that matter) and has loose connections inbetween those instances.", + "tutor_referenced_text": null, + "ai_feedback": "Some sentences could be clearer and more concise. For instance, sentence 10 is quite long and could be broken into two sentences for better readability. Consider simplifying complex sentences to enhance clarity.", + "tutor_feedback": null, + "consistent": false + }, + { + "ai_referenced_text": "probably even via a specific interface to eliminate having to change more than one instance at a time.", + "tutor_referenced_text": null, + "ai_feedback": "The overall structure of your submission is logical, but the flow between ideas could be improved. Consider using transitional phrases to connect your thoughts more smoothly, especially between the definitions of cohesion and coupling and the example of banking software.", + "tutor_feedback": null, + "consistent": false + } + ] } ] } diff --git a/modules/text/module_text_llm/module_text_llm/__main__.py b/modules/text/module_text_llm/module_text_llm/__main__.py index cc5977add..a816ea381 100644 --- a/modules/text/module_text_llm/module_text_llm/__main__.py +++ b/modules/text/module_text_llm/module_text_llm/__main__.py @@ -3,7 +3,7 @@ import nltk import tiktoken -from athena import app, submission_selector, submissions_consumer, feedback_consumer, feedback_provider, evaluation_provider +from athena import app, submission_selector, submissions_consumer, feedback_consumer, feedback_provider, evaluation_provider, feedback_feeder from athena.text import Exercise, Submission, Feedback from athena.logger import logger @@ -27,10 +27,19 @@ async def process_incoming_feedback(exercise: Exercise, submission: Submission, logger.info("process_feedback: Received %d feedbacks for submission %d of exercise %d. Approach: %s", len(feedbacks), submission.id, exercise.id,module_config.approach.__class__.__name__) logger.info("useForContinuousLearning: %s", use_for_continuous_learning) # logger.info("Recieved feedbacks: %s", feedbacks) - if use_for_continuous_learning: - updated_SGI = await update_grading_instructions(exercise, feedbacks,module_config.approach, submission) - logger.info("Updated grading instructions: %s", updated_SGI) - return updated_SGI + # if use_for_continuous_learning: + # updated_SGI = await update_grading_instructions(exercise, feedbacks,module_config.approach, submission) + # logger.info("Updated grading instructions: %s", updated_SGI) + # return updated_SGI + +@feedback_feeder +async def feed_feedback(exercise: Exercise, submission: Submission, feedbacks: List[Feedback], use_for_continuous_learning: bool, module_config: Configuration): + logger.info("process_feedback: Received %d feedbacks for submission %d of exercise %d. Approach: %s", len(feedbacks), submission.id, exercise.id,module_config.approach.__class__.__name__) + logger.info("useForContinuousLearning: %s", use_for_continuous_learning) + # logger.info("Recieved feedbacks: %s", feedbacks) + return await update_grading_instructions(exercise, feedbacks,module_config.approach, submission) + + @feedback_provider async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded: bool, module_config: Configuration) -> List[Feedback]: logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested, with approach: %s",