Skip to content

Commit c63d5cb

Browse files
committed
Add additional guidance for custom news
1 parent 5b3a528 commit c63d5cb

File tree

1 file changed

+24
-17
lines changed

1 file changed

+24
-17
lines changed

agents/news.py

+24-17
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,8 @@ def error(text):
5959
"baseprompt": "You are {agentname}, a highly intelligent assistant. The current date is {date}.\n\nYou should give concise responses to very simple questions, but provide thorough responses to more complex and open-ended questions.",
6060
"weather": "You are a weather forecaster. Keep your answers brief and accurate. Current date is {date} and weather conditions:\n[DATA]{context_str}[/DATA]\nProvide a weather update, current weather alerts, conditions, precipitation and forecast for {location} and answer this: {prompt}.",
6161
"stock": "You are a stock analyst. Keep your answers brief and accurate. Current date is {date}.",
62-
"news": "You are a newscaster who specializes in providing headline news. Use only the following context provided by Google News to summarize the top 10 headlines for today. Rank headlines by most important to least important but do not explain why. Always include the news organization and ID. List no more than 10 and do not add a preamble or any commentary.\nAlways use this format:\n#. [News Item] - [News Source] - ID: [ID]\nHere are some examples but never use these: \n1. The World is Round - Science - ID: 11\n2. The Election is over and Children have won - US News - ID: 22\n3. Storms Hit the Southern Coast - ABC - ID: 55\n. Context: {context_str}\nTop 10 Headlines with Source and ID:",
62+
"news": "You are a newscaster who specializes in providing headline news. Use only the following context provided by Google News to summarize the top 10 headlines for today. Rank headlines by most important to least important but do not explain why and reduce duplicates. Always include the news organization and ID. List no more than 10 and do not add a preamble or any commentary.\nAlways use this format:\n#. [News Item] - [News Source] - ID: [ID]\nHere are some examples but never use these: \n1. The World is Round - Science - ID: 11\n2. The Election is over and Children have won - US News - ID: 22\n3. Storms Hit the Southern Coast - ABC - ID: 55\n. Context: {context_str}\nTop 10 Headlines with Source and ID:",
63+
"news_custom": "You are a newscaster who specializes in providing headline news. Use only the following context provided by Google News to summarize the top 10 headlines for today. Rank headlines by most important to least important but do not explain why and reduce duplicates. Always include the news organization and ID. List no more than 10 and do not add a preamble or any commentary.\nAlways use this format:\n#. [News Item] - [News Source] - ID: [ID]\nHere are some examples but never use these: \n1. {topic} is Round - Science - ID: 11\n2. The Election is over and {topic} won - US News - ID: 22\n3. Storms Hit the Southern Coast - ABC - ID: 55\n. Context: {context_str}\nTop 10 Headlines for {topic} with Source and ID:",
6364
"clarify": "You are a highly intelligent assistant. Keep your answers brief and accurate. {format}.",
6465
"location": "What location is specified in this prompt, state None if there isn't one. Use a single word answer. [BEGIN] {prompt} [END]",
6566
"company": "What company is related to the stock price in this prompt? Please state none if there isn't one. Use a single word answer: [BEGIN] {prompt} [END]",
@@ -136,7 +137,7 @@ def base_prompt(content=None):
136137
context = base_prompt()
137138

138139
# Function - Send single prompt to LLM for response
139-
def ask(prompt):
140+
def ask(prompt, temperature=TEMPERATURE):
140141
context = base_prompt()
141142
try:
142143
context.append({"role": "user", "content": prompt})
@@ -146,7 +147,7 @@ def ask(prompt):
146147
model=mymodel,
147148
max_tokens=MAXTOKENS,
148149
stream=False, # Wait for completion
149-
temperature=TEMPERATURE,
150+
temperature=temperature,
150151
messages=context,
151152
)
152153
except openai.OpenAIError as err:
@@ -161,7 +162,7 @@ def ask(prompt):
161162
log(f"ask -> {response.choices[0].message.content.strip()}")
162163
return response.choices[0].message.content.strip()
163164

164-
def ask_llm(query, format=""):
165+
def ask_llm(query, format="", temperature=TEMPERATURE):
165166
# Ask LLM a question
166167
if format == "":
167168
format = f"Respond in {format}."
@@ -173,7 +174,7 @@ def ask_llm(query, format=""):
173174
model=mymodel,
174175
max_tokens=MAXTOKENS,
175176
stream=False,
176-
temperature=TEMPERATURE,
177+
temperature=temperature,
177178
messages=content,
178179
)
179180
log(f"ask_llm -> {response.choices[0].message.content.strip()}")
@@ -284,13 +285,17 @@ def get_news(topic, max=10):
284285
break
285286
return articles
286287

287-
def fetch_news(topic, retries=3):
288+
289+
def fetch_news(topic, retries=3, check=False):
288290
if retries == 0:
289291
return "Unable to fetch news", "Unable to fetch news"
290292
log("Get News")
291-
context_str = get_news(topic, 25)
292-
log(f"News Raw Context = {context_str}")
293-
prompt = expand_prompt(prompts["news"], {"context_str": context_str})
293+
raw_news = get_news(topic, 25)
294+
log(f"News Raw Context for topic {topic} = {raw_news}\n\n")
295+
if topic:
296+
prompt = expand_prompt(prompts["news_custom"], {"context_str": raw_news, "topic": topic})
297+
else:
298+
prompt = expand_prompt(prompts["news"], {"context_str": raw_news})
294299
answer = ask(prompt)
295300
# Replace IDs in answer with URLs
296301
result = ""
@@ -307,20 +312,22 @@ def fetch_news(topic, retries=3):
307312
# Ensure we have a valid UUId that is a integer
308313
if not uuid.isdigit():
309314
result += line
310-
continue
315+
continue
311316
url = news_cache.get(int(uuid))
312317
result += f"{title} <a href=\"{url}\">[Link]</a>"
313318
else:
314319
result += line
315320
else:
316321
result += line
317322
result += "\n"
318-
# Query the LLM to see if all_lines are duplicated
319-
prompt = expand_prompt(prompts["rag"], {"context_str": "\n".join(all_lines), "prompt": "Are these news items all about the same thing?"})
320-
answer = ask(prompt)
321-
if "yes" in answer.lower():
322-
log("News items are all about the same thing")
323-
return fetch_news(topic, retries-1)
323+
if check:
324+
# Query the LLM to see if all_lines are duplicated
325+
prompt = expand_prompt(prompts["rag"], {"context_str": "\n".join(all_lines), "prompt": "Do these look like the same headline?"})
326+
answer = ask(prompt)
327+
if "yes" in answer.lower():
328+
log("News items are not about {topic}")
329+
log(f"\n\nresponse={answer}\n\n{all_lines}")
330+
return fetch_news(topic, retries-1)
324331
return result, text_only
325332

326333
def handle_weather_command(p):
@@ -385,7 +392,7 @@ def buffer(s):
385392
# Fetch News Payloads
386393
news, news_text = fetch_news("")
387394
company_news, company_text = fetch_news(COMPANY)
388-
science_news, science_text = fetch_news("Science")
395+
science_news, science_text = fetch_news("Science",check=True)
389396

390397
# Personalized News Summary
391398
buddy_request = f"{ABOUT_ME} Provide a paragraph summary of the news that should be most interesting to me. Say it as a concerned friend and are giving me a short update for my day."

0 commit comments

Comments
 (0)