Skip to content

Commit 558adb3

Browse files
authored
Merge pull request #96 from DL4DS/cleanup
removing debug print statements and minor fix
2 parents aa1b9ca + 669108f commit 558adb3

File tree

4 files changed

+52
-115
lines changed

4 files changed

+52
-115
lines changed

apps/ai_tutor/chainlit_app.py

Lines changed: 6 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
get_sources,
1414
get_history_chat_resume,
1515
get_history_setup_llm,
16-
get_last_config,
16+
# get_last_config,
1717
)
1818
from modules.chat_processor.helpers import (
1919
update_user_info,
@@ -27,7 +27,6 @@
2727
import copy
2828
from typing import Optional
2929
from chainlit.types import ThreadDict
30-
import time
3130
import base64
3231
from langchain_community.callbacks import get_openai_callback
3332
from datetime import datetime, timezone
@@ -90,7 +89,6 @@ async def setup_llm(self):
9089
9190
#TODO: Clean this up.
9291
"""
93-
start_time = time.time()
9492

9593
llm_settings = cl.user_session.get("llm_settings", {})
9694
(
@@ -138,8 +136,6 @@ async def setup_llm(self):
138136
cl.user_session.set("chain", self.chain)
139137
cl.user_session.set("llm_tutor", self.llm_tutor)
140138

141-
print("Time taken to setup LLM: ", time.time() - start_time)
142-
143139
@no_type_check
144140
async def update_llm(self, new_settings: Dict[str, Any]):
145141
"""
@@ -222,32 +218,9 @@ async def inform_llm_settings(self):
222218
"""
223219
Inform the user about the updated LLM settings and display them as a message.
224220
"""
225-
llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
226-
llm_tutor = cl.user_session.get("llm_tutor")
227-
settings_dict = {
228-
"model": llm_settings.get("chat_model"),
229-
"retriever": llm_settings.get("retriever_method"),
230-
"memory_window": llm_settings.get("memory_window"),
231-
"num_docs_in_db": (
232-
len(llm_tutor.vector_db)
233-
if llm_tutor and hasattr(llm_tutor, "vector_db")
234-
else 0
235-
),
236-
"view_sources": llm_settings.get("view_sources"),
237-
"follow_up_questions": llm_settings.get("follow_up_questions"),
238-
}
239-
print("Settings Dict: ", settings_dict)
240221
await cl.Message(
241222
author=SYSTEM,
242223
content="LLM settings have been updated. You can continue with your Query!",
243-
# elements=[
244-
# cl.Text(
245-
# name="settings",
246-
# display="side",
247-
# content=json.dumps(settings_dict, indent=4),
248-
# language="json",
249-
# ),
250-
# ],
251224
).send()
252225

253226
async def set_starters(self):
@@ -306,8 +279,6 @@ async def start(self):
306279
and display and load previous conversation if chat logging is enabled.
307280
"""
308281

309-
start_time = time.time()
310-
311282
await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
312283

313284
user = cl.user_session.get("user")
@@ -335,8 +306,6 @@ async def start(self):
335306
cl.user_session.set("llm_tutor", self.llm_tutor)
336307
cl.user_session.set("chain", self.chain)
337308

338-
print("Time taken to start LLM: ", time.time() - start_time)
339-
340309
async def stream_response(self, response):
341310
"""
342311
Stream the response from the LLM.
@@ -367,8 +336,6 @@ async def main(self, message):
367336
message: The incoming chat message.
368337
"""
369338

370-
start_time = time.time()
371-
372339
chain = cl.user_session.get("chain")
373340
token_count = 0 # initialize token count
374341
if not chain:
@@ -386,8 +353,6 @@ async def main(self, message):
386353
user.metadata = updated_user.metadata
387354
cl.user_session.set("user", user)
388355

389-
print("\n\n User Tokens Left: ", user.metadata["tokens_left"])
390-
391356
# see if user has token credits left
392357
# if not, return message saying they have run out of tokens
393358
if user.metadata["tokens_left"] <= 0 and "admin" not in user.metadata["role"]:
@@ -478,12 +443,9 @@ async def main(self, message):
478443
)
479444
answer_with_sources = answer_with_sources.replace("$$", "$")
480445

481-
print("Time taken to process the message: ", time.time() - start_time)
482-
483446
actions = []
484447

485448
if self.config["llm_params"]["generate_follow_up"]:
486-
start_time = time.time()
487449
cb_follow_up = cl.AsyncLangchainCallbackHandler()
488450
config = {
489451
"callbacks": (
@@ -513,8 +475,6 @@ async def main(self, message):
513475
)
514476
)
515477

516-
print("Time taken to generate questions: ", time.time() - start_time)
517-
518478
# # update user info with token count
519479
tokens_left = await update_user_from_chainlit(user, token_count)
520480

@@ -532,21 +492,20 @@ async def main(self, message):
532492
).send()
533493

534494
async def on_chat_resume(self, thread: ThreadDict):
535-
thread_config = None
495+
# thread_config = None
536496
steps = thread["steps"]
537497
k = self.config["llm_params"][
538498
"memory_window"
539499
] # on resume, alwyas use the default memory window
540500
conversation_list = get_history_chat_resume(steps, k, SYSTEM, LLM)
541-
thread_config = get_last_config(
542-
steps
543-
) # TODO: Returns None for now - which causes config to be reloaded with default values
501+
# thread_config = get_last_config(
502+
# steps
503+
# ) # TODO: Returns None for now - which causes config to be reloaded with default values
544504
cl.user_session.set("memory", conversation_list)
545-
await self.start(config=thread_config)
505+
await self.start()
546506

547507
@cl.header_auth_callback
548508
def header_auth_callback(headers: dict) -> Optional[cl.User]:
549-
print("\n\n\nI am here\n\n\n")
550509
# try: # TODO: Add try-except block after testing
551510
# TODO: Implement to get the user information from the headers (not the cookie)
552511
cookie = headers.get("cookie") # gets back a str
@@ -562,10 +521,6 @@ def header_auth_callback(headers: dict) -> Optional[cl.User]:
562521
).decode()
563522
decoded_user_info = json.loads(decoded_user_info)
564523

565-
print(
566-
f"\n\n USER ROLE: {decoded_user_info['literalai_info']['metadata']['role']} \n\n"
567-
)
568-
569524
return cl.User(
570525
id=decoded_user_info["literalai_info"]["id"],
571526
identifier=decoded_user_info["literalai_info"]["identifier"],

apps/ai_tutor/helpers.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,6 @@ async def check_user_cooldown(
3232
cooldown_end_time = last_message_time + timedelta(seconds=COOLDOWN_TIME)
3333
cooldown_end_time_iso = cooldown_end_time.isoformat()
3434

35-
# Debug: Print the cooldown end time
36-
print(f"Cooldown end time (ISO): {cooldown_end_time_iso}")
37-
3835
# Check if the user is still in cooldown
3936
if elapsed_time_in_seconds < COOLDOWN_TIME:
4037
return True, cooldown_end_time_iso # Return in ISO 8601 format
@@ -81,10 +78,6 @@ async def reset_tokens_for_user(user_info, TOKENS_LEFT, REGEN_TIME):
8178
# Ensure the new token count does not exceed max_tokens
8279
new_token_count = min(current_tokens + tokens_to_regenerate, max_tokens)
8380

84-
print(
85-
f"\n\n Adding {tokens_to_regenerate} tokens to the user, Time elapsed: {elapsed_time_in_seconds} seconds, Tokens after regeneration: {new_token_count}, Tokens before: {current_tokens} \n\n"
86-
)
87-
8881
# Update the user's token count
8982
user_info["metadata"]["tokens_left"] = new_token_count
9083

apps/chainlit_base/chainlit_base.py

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
get_history_setup_llm,
99
)
1010
import copy
11-
import time
1211
from langchain_community.callbacks import get_openai_callback
1312
from config.config_manager import config_manager
1413

@@ -36,7 +35,6 @@ async def setup_llm(self):
3635
3736
#TODO: Clean this up.
3837
"""
39-
start_time = time.time()
4038

4139
llm_settings = cl.user_session.get("llm_settings", {})
4240
(
@@ -84,8 +82,6 @@ async def setup_llm(self):
8482
cl.user_session.set("chain", self.chain)
8583
cl.user_session.set("llm_tutor", self.llm_tutor)
8684

87-
print("Time taken to setup LLM: ", time.time() - start_time)
88-
8985
@no_type_check
9086
async def update_llm(self, new_settings: Dict[str, Any]):
9187
"""
@@ -168,32 +164,9 @@ async def inform_llm_settings(self):
168164
"""
169165
Inform the user about the updated LLM settings and display them as a message.
170166
"""
171-
llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
172-
llm_tutor = cl.user_session.get("llm_tutor")
173-
settings_dict = {
174-
"model": llm_settings.get("chat_model"),
175-
"retriever": llm_settings.get("retriever_method"),
176-
"memory_window": llm_settings.get("memory_window"),
177-
"num_docs_in_db": (
178-
len(llm_tutor.vector_db)
179-
if llm_tutor and hasattr(llm_tutor, "vector_db")
180-
else 0
181-
),
182-
"view_sources": llm_settings.get("view_sources"),
183-
"follow_up_questions": llm_settings.get("follow_up_questions"),
184-
}
185-
print("Settings Dict: ", settings_dict)
186167
await cl.Message(
187168
author=SYSTEM,
188169
content="LLM settings have been updated. You can continue with your Query!",
189-
# elements=[
190-
# cl.Text(
191-
# name="settings",
192-
# display="side",
193-
# content=json.dumps(settings_dict, indent=4),
194-
# language="json",
195-
# ),
196-
# ],
197170
).send()
198171

199172
async def set_starters(self):
@@ -243,8 +216,6 @@ async def start(self):
243216
and display and load previous conversation if chat logging is enabled.
244217
"""
245218

246-
start_time = time.time()
247-
248219
await self.make_llm_settings_widgets(self.config) # Reload the settings widgets
249220

250221
# TODO: remove self.user with cl.user_session.get("user")
@@ -263,8 +234,6 @@ async def start(self):
263234
cl.user_session.set("llm_tutor", self.llm_tutor)
264235
cl.user_session.set("chain", self.chain)
265236

266-
print("Time taken to start LLM: ", time.time() - start_time)
267-
268237
async def stream_response(self, response):
269238
"""
270239
Stream the response from the LLM.
@@ -295,8 +264,6 @@ async def main(self, message):
295264
message: The incoming chat message.
296265
"""
297266

298-
start_time = time.time()
299-
300267
chain = cl.user_session.get("chain")
301268
token_count = 0 # initialize token count
302269
if not chain:
@@ -342,12 +309,9 @@ async def main(self, message):
342309
)
343310
answer_with_sources = answer_with_sources.replace("$$", "$")
344311

345-
print("Time taken to process the message: ", time.time() - start_time)
346-
347312
actions = []
348313

349314
if self.config["llm_params"]["generate_follow_up"]:
350-
start_time = time.time()
351315
cb_follow_up = cl.AsyncLangchainCallbackHandler()
352316
config = {
353317
"callbacks": (
@@ -377,9 +341,6 @@ async def main(self, message):
377341
)
378342
)
379343

380-
print("Time taken to generate questions: ", time.time() - start_time)
381-
print("Total Tokens Used: ", token_count)
382-
383344
await cl.Message(
384345
content=answer_with_sources,
385346
elements=source_elements,

modules/chat/helpers.py

Lines changed: 46 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -137,31 +137,59 @@ def get_history_chat_resume(steps, k, SYSTEM, LLM):
137137

138138
def get_history_setup_llm(memory_list):
139139
conversation_list = []
140-
for message in memory_list:
141-
message_dict = message.to_dict() if hasattr(message, "to_dict") else message
142-
143-
# Check if the type attribute is present as a key or attribute
144-
message_type = (
145-
message_dict.get("type", None)
146-
if isinstance(message_dict, dict)
147-
else getattr(message, "type", None)
140+
i = 0
141+
while i < len(memory_list) - 1:
142+
# Process the current and next message
143+
current_message = memory_list[i]
144+
next_message = memory_list[i + 1]
145+
146+
# Convert messages to dictionary if necessary
147+
current_message_dict = (
148+
current_message.to_dict()
149+
if hasattr(current_message, "to_dict")
150+
else current_message
151+
)
152+
next_message_dict = (
153+
next_message.to_dict() if hasattr(next_message, "to_dict") else next_message
148154
)
149155

150-
# Check if content is present as a key or attribute
151-
message_content = (
152-
message_dict.get("content", None)
153-
if isinstance(message_dict, dict)
154-
else getattr(message, "content", None)
156+
# Check message type and content for current and next message
157+
current_message_type = (
158+
current_message_dict.get("type", None)
159+
if isinstance(current_message_dict, dict)
160+
else getattr(current_message, "type", None)
161+
)
162+
current_message_content = (
163+
current_message_dict.get("content", None)
164+
if isinstance(current_message_dict, dict)
165+
else getattr(current_message, "content", None)
155166
)
156167

157-
if message_type in ["ai", "ai_message"]:
158-
conversation_list.append({"type": "ai_message", "content": message_content})
159-
elif message_type in ["human", "user_message"]:
168+
next_message_type = (
169+
next_message_dict.get("type", None)
170+
if isinstance(next_message_dict, dict)
171+
else getattr(next_message, "type", None)
172+
)
173+
next_message_content = (
174+
next_message_dict.get("content", None)
175+
if isinstance(next_message_dict, dict)
176+
else getattr(next_message, "content", None)
177+
)
178+
179+
# Check if the current message is user message and the next one is AI message
180+
if current_message_type in ["human", "user_message"] and next_message_type in [
181+
"ai",
182+
"ai_message",
183+
]:
184+
conversation_list.append(
185+
{"type": "user_message", "content": current_message_content}
186+
)
160187
conversation_list.append(
161-
{"type": "user_message", "content": message_content}
188+
{"type": "ai_message", "content": next_message_content}
162189
)
190+
i += 2 # Skip the next message since it has been paired
163191
else:
164-
raise ValueError("Invalid message type")
192+
i += 1 # Move to the next message if not a valid pair (example user message, followed by the cooldown system message)
165193

166194
return conversation_list
167195

0 commit comments

Comments
 (0)