|
13 | 13 | from module_text_llm.approach_controller import generate_suggestions
|
14 | 14 | from module_text_llm.helpers.detect_suspicios_submission import hybrid_suspicion_score, llm_check
|
15 | 15 | from module_text_llm.helpers.feedback_icl.store_feedback_icl import store_feedback_icl
|
16 |
| - |
| 16 | +from module_text_llm.few_shot_chain_of_thought_approach import FewShotChainOfThoughtConfig |
17 | 17 | #Test Demo
|
18 | 18 | @submissions_consumer
|
19 | 19 | def receive_submissions(exercise: Exercise, submissions: List[Submission]):
|
@@ -42,15 +42,19 @@ def process_incoming_feedback(exercise: Exercise, submission: Submission, feedba
|
42 | 42 | async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded: bool, module_config: Configuration) -> List[Feedback]:
|
43 | 43 | logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested, with approach: %s",
|
44 | 44 | "Graded" if is_graded else "Non-graded", submission.id, exercise.id, module_config.approach.__class__.__name__)
|
45 |
| - logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested", |
46 |
| - "Graded" if is_graded else "Non-graded", submission.id, exercise.id) |
47 |
| - is_sus, score = hybrid_suspicion_score(submission.text, threshold=0.8) |
48 |
| - if is_sus: |
49 |
| - logger.info("Suspicious submission detected with score %f", score) |
50 |
| - is_suspicious,suspicios_text = await llm_check(submission.text) |
51 |
| - if is_suspicious: |
52 |
| - logger.info("Suspicious submission detected by LLM with text %s", suspicios_text) |
53 |
| - return [Feedback(title="Instructors need to review this submission", description="This Submission potentially violates the content policy!", credits=-1.0, exercise_id=exercise.id, submission_id=submission.id, is_graded=is_graded)] |
| 45 | + |
| 46 | + if not is_graded: |
| 47 | + is_sus, score = hybrid_suspicion_score(submission.text, threshold=0.8) |
| 48 | + if is_sus: |
| 49 | + logger.info("Suspicious submission detected with score %f", score) |
| 50 | + is_suspicious,suspicios_text = await llm_check(submission.text) |
| 51 | + if is_suspicious: |
| 52 | + logger.info("Suspicious submission detected by LLM with text %s", suspicios_text) |
| 53 | + return [Feedback(title="Instructors need to review this submission", description="This Submission potentially violates the content policy!", credits=-1.0, exercise_id=exercise.id, submission_id=submission.id, is_graded=is_graded)] |
| 54 | + module_config.approach = FewShotChainOfThoughtConfig() |
| 55 | + return await generate_suggestions(exercise, submission, module_config.approach, module_config.debug, is_graded) |
| 56 | + |
| 57 | + |
54 | 58 | return await generate_suggestions(exercise, submission, module_config.approach, module_config.debug, is_graded)
|
55 | 59 |
|
56 | 60 |
|
|
0 commit comments