Skip to content

Commit 046c60b

Browse files
author
= Enea_Gore
committed
k1
1 parent be55918 commit 046c60b

File tree

2 files changed

+17
-29
lines changed

2 files changed

+17
-29
lines changed

modules/text/module_text_llm/module_text_llm/__main__.py

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from module_text_llm.approach_controller import generate_suggestions
1414
from module_text_llm.helpers.detect_suspicios_submission import hybrid_suspicion_score, llm_check
1515
from module_text_llm.helpers.feedback_icl.store_feedback_icl import store_feedback_icl
16-
16+
from module_text_llm.few_shot_chain_of_thought_approach import FewShotChainOfThoughtConfig
1717
#Test Demo
1818
@submissions_consumer
1919
def receive_submissions(exercise: Exercise, submissions: List[Submission]):
@@ -42,15 +42,19 @@ def process_incoming_feedback(exercise: Exercise, submission: Submission, feedba
4242
async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded: bool, module_config: Configuration) -> List[Feedback]:
4343
logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested, with approach: %s",
4444
"Graded" if is_graded else "Non-graded", submission.id, exercise.id, module_config.approach.__class__.__name__)
45-
logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested",
46-
"Graded" if is_graded else "Non-graded", submission.id, exercise.id)
47-
is_sus, score = hybrid_suspicion_score(submission.text, threshold=0.8)
48-
if is_sus:
49-
logger.info("Suspicious submission detected with score %f", score)
50-
is_suspicious,suspicios_text = await llm_check(submission.text)
51-
if is_suspicious:
52-
logger.info("Suspicious submission detected by LLM with text %s", suspicios_text)
53-
return [Feedback(title="Instructors need to review this submission", description="This Submission potentially violates the content policy!", credits=-1.0, exercise_id=exercise.id, submission_id=submission.id, is_graded=is_graded)]
45+
46+
if not is_graded:
47+
is_sus, score = hybrid_suspicion_score(submission.text, threshold=0.8)
48+
if is_sus:
49+
logger.info("Suspicious submission detected with score %f", score)
50+
is_suspicious,suspicios_text = await llm_check(submission.text)
51+
if is_suspicious:
52+
logger.info("Suspicious submission detected by LLM with text %s", suspicios_text)
53+
return [Feedback(title="Instructors need to review this submission", description="This Submission potentially violates the content policy!", credits=-1.0, exercise_id=exercise.id, submission_id=submission.id, is_graded=is_graded)]
54+
module_config.approach = FewShotChainOfThoughtConfig()
55+
return await generate_suggestions(exercise, submission, module_config.approach, module_config.debug, is_graded)
56+
57+
5458
return await generate_suggestions(exercise, submission, module_config.approach, module_config.debug, is_graded)
5559

5660

modules/text/module_text_llm/module_text_llm/helpers/feedback_icl/store_feedback_icl.py

Lines changed: 3 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -21,33 +21,17 @@ def store_feedback_icl(submission: Submission, exercise: Exercise, feedbacks: Li
2121

2222
def save_embedding_with_metadata(embedding,submission, exercise_id, metadata):
2323
embeddings_file = f"embeddings_{exercise_id}.npy"
24-
full_return = []
25-
print("inside reference")
26-
print(type(metadata))
27-
print(metadata.keys())
28-
# print()
29-
# print(metadata["feedbacks"])
30-
# for data in metadata["feedbacks"]:
31-
# if "index_start" in metadata.keys() and "index_end" in metadata.keys():
32-
# print(metadata["index_start"])
33-
# print(metadata["index_end"])
34-
24+
3525
if metadata["index_start"] is not None and metadata["index_end"] is not None:
3626
reference = submission.text[metadata["index_start"]:metadata["index_end"]]
3727
metadata["text_reference"] = reference
38-
# print(reference)
39-
# full_return.append(metadata)
40-
print(full_return)
28+
4129
try:
42-
print("inside try")
4330
if os.path.exists(embeddings_file):
44-
# Load existing data
4531
existing_data = np.load(embeddings_file, allow_pickle=True).item()
46-
# Append the new embedding and metadata
4732
existing_data['embeddings'] = np.vstack((existing_data['embeddings'], embedding))
4833
existing_data['metadata'].append(metadata)
4934
else:
50-
# Create a new dictionary with embeddings and metadata
5135
existing_data = {
5236
'embeddings': np.array([embedding], dtype=np.float32),
5337
'metadata': [metadata]
@@ -78,7 +62,7 @@ def save_embeddings_to_file(embeddings, filename="keyword_embeddings.npy"):
7862
print(f"Embeddings saved to {filename}")
7963

8064

81-
def query_embedding(query_embedding, exercise_id, k=5):
65+
def query_embedding(query_embedding, exercise_id, k=1):
8266
"""
8367
Query the top-k most similar embeddings to the provided query_embedding
8468
for a given exercise ID. Return the corresponding metadata for these embeddings.

0 commit comments

Comments
 (0)