Skip to content

Commit 47489f3

Browse files
committed
update default model to gpt-4o + fix edit message error on streaming completion
1 parent 95e6de2 commit 47489f3

File tree

4 files changed

+24
-22
lines changed

4 files changed

+24
-22
lines changed

src/modules/llms/api/athropic.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ export const anthropicStreamCompletion = async (
8585
let completion = ''
8686
let outputTokens = ''
8787
let inputTokens = ''
88+
let message = ''
8889
for await (const chunk of completionStream) {
8990
const msg = chunk.toString()
9091
if (msg) {
@@ -115,7 +116,8 @@ export const anthropicStreamCompletion = async (
115116
completion = completion.replaceAll('...', '')
116117
completion += '...'
117118
wordCount = 0
118-
if (ctx.chat?.id) {
119+
if (ctx.chat?.id && message !== completion) {
120+
message = completion
119121
await ctx.api
120122
.editMessageText(ctx.chat?.id, msgId, completion)
121123
.catch(async (e: any) => {

src/modules/llms/api/openai.ts

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@ export const streamChatCompletion = async (
131131
throw new Error('Context chat id should not be empty after openAI streaming')
132132
}
133133
// let wordCountMinimumCounter = 1;
134+
let message = ''
134135
for await (const part of stream) {
135136
wordCount++
136137
const chunck = part.choices[0]?.delta?.content
@@ -147,19 +148,22 @@ export const streamChatCompletion = async (
147148
completion = completion.replaceAll('...', '')
148149
completion += '...'
149150
wordCount = 0
150-
await ctx.api
151-
.editMessageText(ctx.chat?.id, msgId, completion)
152-
.catch(async (e: any) => {
153-
if (e instanceof GrammyError) {
154-
if (e.error_code !== 400) {
155-
throw e
151+
if (message !== completion) {
152+
message = completion
153+
await ctx.api
154+
.editMessageText(ctx.chat?.id, msgId, completion)
155+
.catch(async (e: any) => {
156+
if (e instanceof GrammyError) {
157+
if (e.error_code !== 400) {
158+
throw e
159+
} else {
160+
logger.error(e)
161+
}
156162
} else {
157-
logger.error(e)
163+
throw e
158164
}
159-
} else {
160-
throw e
161-
}
162-
})
165+
})
166+
}
163167
}
164168
}
165169
completion = completion.replaceAll('...', '')

src/modules/llms/api/vertex.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ export const vertexStreamCompletion = async (
8585
let completion = ''
8686
let outputTokens = ''
8787
let inputTokens = ''
88+
let message = ''
8889
for await (const chunk of completionStream) {
8990
const msg = chunk.toString()
9091
if (msg) {
@@ -97,15 +98,16 @@ export const vertexStreamCompletion = async (
9798
}
9899
completion = completion.replaceAll('...', '')
99100
completion += '...'
100-
if (ctx.chat?.id) {
101+
if (ctx.chat?.id && message !== completion) {
102+
message = completion
101103
await ctx.api
102104
.editMessageText(ctx.chat?.id, msgId, completion)
103105
.catch(async (e: any) => {
104106
if (e instanceof GrammyError) {
105107
if (e.error_code !== 400) {
106108
throw e
107109
} else {
108-
logger.error(e)
110+
logger.error(e.message)
109111
}
110112
} else {
111113
throw e

src/modules/llms/openaiBot.ts

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -124,21 +124,15 @@ export class OpenAIBot extends LlmsBase {
124124
SupportedCommands.chat,
125125
SupportedCommands.ask,
126126
SupportedCommands.gpt4,
127-
SupportedCommands.gpt
127+
SupportedCommands.gpt,
128+
SupportedCommands.gpto
128129
]) ||
129130
hasChatPrefix(ctx.message?.text ?? '') ||
130131
isMentioned(ctx) ||
131132
((ctx.message?.text?.startsWith('chat ') ??
132133
ctx.message?.text?.startsWith('ask ')) &&
133134
ctx.chat?.type === 'private')
134135
) {
135-
this.updateSessionModel(ctx, LlmsModelsEnum.GPT_4)
136-
await this.onChat(ctx, LlmsModelsEnum.GPT_4, true, false)
137-
return
138-
}
139-
140-
if (
141-
ctx.hasCommand([SupportedCommands.gpto])) {
142136
this.updateSessionModel(ctx, LlmsModelsEnum.GPT_4O)
143137
await this.onChat(ctx, LlmsModelsEnum.GPT_4O, true, false)
144138
return

0 commit comments

Comments
 (0)