-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathinference_standalone_openai.py
More file actions
299 lines (239 loc) · 11.5 KB
/
inference_standalone_openai.py
File metadata and controls
299 lines (239 loc) · 11.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
import os
import json
import random
import argparse
import yaml
import re
import torch
from tqdm import tqdm
import csv
from openai import OpenAI
class Evaluation:
def __init__(self, args, cmd_args):
self.args = args
# Load API keys
token_path = cmd_args.token_path
with open(os.path.join(token_path, "openai_key.txt"), "r") as api_key_file:
self.openai_key = api_key_file.read()
self.client = OpenAI(api_key=self.openai_key)
def query_llm(self, question, all_options, context=None, instructions=None, verbose=False):
assert context is None or isinstance(context, list), "Context must be a list of dictionaries"
if instructions is None:
instructions = "Find the most appropriate model response and give your final answer (a), (b), (c), or (d) after the special token <final_answer>."
if context:
messages = context + [{"role": "user", "content": question + '\n\n' + instructions + '\n\n' + all_options},]
else:
messages = [{"role": "user", "content": question + '\n\n' + instructions + '\n\n' + all_options},]
if 'o' in self.args['models']['llm_model']:
messages = convert_role_system_to_user(messages)
response = self.client.chat.completions.create(
model=self.args['models']['llm_model'],
messages=messages,
)
response = response.choices[0].message.content
if verbose:
print("model response: ", response)
return response
def extract_answer(self, predicted_answer, correct_answer):
def _extract_only_options(text):
text = text.lower()
in_parens = re.findall(r'\(([a-d])\)', text)
if in_parens:
return set(in_parens)
else:
return set(re.findall(r'\b([a-d])\b', text))
correct = correct_answer.lower().strip("() ")
# Clean predicted_answer
full_response = predicted_answer
predicted_answer = predicted_answer.strip()
if "<final_answer>" in predicted_answer:
predicted_answer = predicted_answer.split("<final_answer>")[-1].strip()
if predicted_answer.endswith("</final_answer>"):
predicted_answer = predicted_answer[:-len("</final_answer>")].strip()
pred_options = _extract_only_options(predicted_answer)
# First try the predicted_answer
if pred_options == {correct}:
return True, predicted_answer
# Optionally fallback to model_response if provided
response_options = _extract_only_options(full_response)
if response_options == {correct}:
return True, predicted_answer
return False, predicted_answer
def convert_role_system_to_user(messages_4o):
"""
Convert OpenAI 4o-style messages (with 'system') to 4.0-style (no 'system').
- System messages are merged into the next message (as a prefix).
- Consecutive messages with the same role are merged into one.
Args:
messages_4o (list of dict): List of messages with roles like 'system', 'user', 'assistant'.
Returns:
list of dict: Cleaned message history for models without system role support.
"""
messages_o1 = []
system_buffer = ""
for msg in messages_4o:
role = msg.get("role")
content = msg.get("content", "")
if role == "system":
system_buffer += f"[System]: {content}\n"
continue
# Prepend system message if buffered
if system_buffer:
content = system_buffer + content
system_buffer = ""
# Merge with previous message if role is the same
if messages_o1 and messages_o1[-1]["role"] == role:
messages_o1[-1]["content"] += "\n" + content
else:
messages_o1.append({"role": role, "content": content})
return messages_o1
def build_jsonl_index(jsonl_path):
"""
Scan the JSONL file once to build a mapping: {key: file_offset}.
Assumes each line is a JSON object with a single key-value pair.
"""
index = {}
with open(jsonl_path, 'r', encoding='utf-8') as f:
while True:
offset = f.tell()
line = f.readline()
if not line:
break
key = next(iter(json.loads(line).keys()))
index[key] = offset
return index
def load_context_by_id(jsonl_path, offset):
"""
Seek to a known offset in the JSONL and load exactly that line.
Returns the value associated with the single key in the JSON object.
"""
with open(jsonl_path, 'r', encoding='utf-8') as f:
f.seek(offset)
item = json.loads(f.readline())
return next(iter(item.values()))
def load_rows(csv_path):
with open(csv_path, mode='r', newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row_number, row in enumerate(reader, start=1):
row_data = {}
for column_name, value in row.items():
row_data[column_name] = value
yield row_data
def load_rows_with_context(csv_path, jsonl_path):
jsonl_index = build_jsonl_index(jsonl_path)
with open(csv_path, mode='r', newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
prev_sid = None
prev_context = None
for row_number, row in enumerate(reader, start=1):
row_data = {}
for column_name, value in row.items():
row_data[column_name] = value
sid = row_data["shared_context_id"]
if sid != prev_sid:
current_context = load_context_by_id(jsonl_path, jsonl_index[sid])
prev_sid = sid
prev_context = current_context
else:
current_context = prev_context
yield row_data, current_context
def count_csv_rows(csv_path):
with open(csv_path, mode='r', newline='', encoding='utf-8') as f:
return sum(1 for _ in f) - 1 # Subtract 1 for header row
def run_evaluation(args, cmd_args, llm, verbose=False):
question_path = cmd_args.question_path
context_path = cmd_args.context_path
result_path = cmd_args.result_path
if os.path.exists(result_path):
os.remove(result_path)
all_errors = []
total_rows = count_csv_rows(question_path)
for row_data, context in tqdm(load_rows_with_context(question_path, context_path), total=total_rows):
try:
# Extract relevant data from the row
persona_id = row_data["persona_id"]
question_id = row_data["question_id"]
question_type = row_data["question_type"]
topic = row_data["topic"]
context_length_in_tokens = row_data["context_length_in_tokens"]
context_length_in_letters = row_data["context_length_in_letters"]
distance_to_ref_in_blocks = row_data["distance_to_ref_in_blocks"]
distance_to_ref_in_tokens = row_data["distance_to_ref_in_tokens"]
num_irrelevant_tokens = row_data["num_irrelevant_tokens"]
distance_to_ref_proportion_in_context = row_data["distance_to_ref_proportion_in_context"]
question = row_data["user_question_or_message"]
correct_answer = row_data["correct_answer"]
all_options = row_data["all_options"]
shared_context_id = row_data["shared_context_id"]
end_index_in_shared_context = row_data["end_index_in_shared_context"]
# Prepare the context for the LLM query
context= context[:int(end_index_in_shared_context)] # Include up to the end index
# Send the query to the LLM
model_response = llm.query_llm(question, all_options, context)
score, predicted_answer = llm.extract_answer(model_response, correct_answer)
# Save the results back to a CSV file together with the question types
if verbose:
print(f"Question: {question}")
print(f"Predicted Answer: {predicted_answer}")
print(f"Correct Answer: {correct_answer}")
print(f"Score: {score}")
with open(result_path, mode='a', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
# Write the header if the file is empty
if os.stat(result_path).st_size == 0:
writer.writerow(["score", "persona_id", "question_id", "user_question_or_message", "question_type", "topic", "context_length_in_tokens", "context_length_in_letters",
"distance_to_ref_in_blocks", "distance_to_ref_in_tokens", "num_irrelevant_tokens", "distance_to_ref_proportion_in_context",
"model_response", "len_of_model_response", "predicted_answer", "correct_answer"])
writer.writerow([
score,
persona_id,
question_id,
question,
question_type,
topic,
context_length_in_tokens,
context_length_in_letters,
distance_to_ref_in_blocks,
distance_to_ref_in_tokens,
num_irrelevant_tokens,
distance_to_ref_proportion_in_context,
model_response,
len(model_response),
predicted_answer,
correct_answer,
])
except Exception as e:
print(f"Error: {e}")
all_errors.append({
"persona_id": row_data["persona_id"],
"question_id": row_data["question_id"],
"error": str(e)
})
continue
if all_errors:
for error in all_errors:
print(f"Error for persona_id {error['persona_id']} and question_id {error['question_id']}: {error['error']}")
if __name__ == "__main__":
# Load hyperparameters
try:
with open('config.yaml', 'r') as file:
args = yaml.safe_load(file)
except Exception as e:
print('Error reading the config file')
# Command-line argument parsing
parser = argparse.ArgumentParser(description='Command line arguments')
parser.add_argument('--model', type=str, default="gpt-4o", help='Set LLM model. Choose from o3-mini, o1, o1-mini, gpt-4o, gpt-4o-mini')
parser.add_argument('--step', type=str, default='prepare', help='Step to run: prepare or evaluate')
parser.add_argument('--token_path', type=str, default='api_tokens', help='Path to the API tokens')
parser.add_argument('--clean', dest='clean', action='store_true', help='Remove existing csv and json files and start clean')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='Set verbose to True')
parser.add_argument('--question_path', type=str, default='data/questions_128k.csv', help='Path to the questions CSV file')
parser.add_argument('--context_path', type=str, default='data/shared_contexts_128k.jsonl', help='Path to the contexts JSONL file')
parser.add_argument('--result_path', type=str, default='data/eval_results.csv', help='Path to save the results CSV file')
cmd_args = parser.parse_args()
args['models']['llm_model'] = cmd_args.model if cmd_args.model is not None else args['models']['llm_model']
llm = Evaluation(args, cmd_args)
if cmd_args.step == 'evaluate':
run_evaluation(args, cmd_args, llm, verbose=cmd_args.verbose)
else:
raise ValueError("Invalid step. Choose 'prepare' or 'evaluate'.")