Skip to content

Commit c686b84

Browse files
committed
refactor openAI command list + fix onPrefix method that was having issues with new prefix
1 parent 9c43b03 commit c686b84

File tree

4 files changed

+54
-66
lines changed

4 files changed

+54
-66
lines changed

src/config.ts

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -80,15 +80,6 @@ export default {
8080
parseInt(process.env.TYPING_STATUS_ENABLED ?? '1')
8181
),
8282
model: process.env.OPENAI_MODEL ?? 'gpt-3.5-turbo',
83-
prefixes: {
84-
chatPrefix: process.env.ASK_PREFIX
85-
? process.env.ASK_PREFIX.split(',')
86-
: ['a.', '.'], // , "?", ">",
87-
newPrefix: process.env.NEW_PREFIX
88-
? process.env.NEW_PREFIX.split(',')
89-
: ['n.', '..'],
90-
llamaPrefix: ['*']
91-
},
9283
minimumBalance: parseInt(process.env.MIN_BALANCE ?? '0')
9384
}
9485
},

src/modules/llms/helpers.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ export const SupportedCommands = {
2222
}
2323

2424
export const MAX_TRIES = 3
25+
const LLAMA_PREFIX_LIST = ['*']
2526

2627
export const isMentioned = (
2728
ctx: OnMessageContext | OnCallBackQueryData
@@ -40,7 +41,7 @@ export const isMentioned = (
4041
}
4142

4243
export const hasLlamaPrefix = (prompt: string): string => {
43-
const prefixList = config.openAi.chatGpt.prefixes.llamaPrefix
44+
const prefixList = LLAMA_PREFIX_LIST
4445
for (let i = 0; i < prefixList.length; i++) {
4546
if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) {
4647
return prefixList[i]

src/modules/open-ai/helpers.ts

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -4,30 +4,31 @@ import { type ParseMode } from 'grammy/types'
44
import { getChatModel, getChatModelPrice, getTokenNumber } from './api/openAi'
55
import { type Message, type InlineKeyboardMarkup } from 'grammy/out/types'
66
import { isValidUrl } from './utils/web-crawler'
7-
// import { llmAddUrlDocument } from '../llms/api/llmApi'
87

9-
export const SupportedCommands = {
10-
chat: { name: 'chat' },
11-
ask: { name: 'ask' },
12-
vision: { name: 'vision' },
13-
ask35: { name: 'ask35' },
14-
new: { name: 'new' },
15-
gpt4: { name: 'gpt4' },
16-
ask32: { name: 'ask32' },
17-
gpt: { name: 'gpt' },
18-
last: { name: 'last' },
19-
dalle: { name: 'dalle' },
20-
dalleImg: { name: 'image' },
21-
dalleShort: { name: 'img' },
22-
dalleShorter: { name: 'i' },
23-
genImgEn: { name: 'genImgEn' },
24-
on: { name: 'on' },
25-
off: { name: 'off' }
8+
export enum SupportedCommands {
9+
chat = 'chat',
10+
ask = 'ask',
11+
vision = 'vision',
12+
ask35 = 'ask35',
13+
new = 'new',
14+
gpt4 = 'gpt4',
15+
ask32 = 'ask32',
16+
gpt = 'gpt',
17+
last = 'last',
18+
dalle = 'dalle',
19+
dalleImg = 'image',
20+
dalleShort = 'img',
21+
dalleShorter = 'i',
22+
genImgEn = 'genImgEn',
23+
on = 'on',
24+
off = 'off'
2625
}
2726

2827
export const MAX_TRIES = 3
2928

30-
const DALLE_PREFIX_LIST = ['i. ', ',', 'image ', 'd.', 'img ']
29+
export const DALLE_PREFIX_LIST = ['i. ', ',', 'image ', 'd.', 'img ']
30+
export const CHAT_GPT_PREFIX_LIST = ['a.', '.']
31+
export const NEW_PREFIX_LIST = ['n.', '..']
3132

3233
export const isMentioned = (
3334
ctx: OnMessageContext | OnCallBackQueryData
@@ -46,7 +47,7 @@ export const isMentioned = (
4647
}
4748

4849
export const hasChatPrefix = (prompt: string): string => {
49-
const prefixList = config.openAi.chatGpt.prefixes.chatPrefix
50+
const prefixList = CHAT_GPT_PREFIX_LIST
5051
for (let i = 0; i < prefixList.length; i++) {
5152
if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) {
5253
return prefixList[i]
@@ -66,7 +67,7 @@ export const hasDallePrefix = (prompt: string): string => {
6667
}
6768

6869
export const hasNewPrefix = (prompt: string): string => {
69-
const prefixList = config.openAi.chatGpt.prefixes.newPrefix
70+
const prefixList = NEW_PREFIX_LIST
7071
for (let i = 0; i < prefixList.length; i++) {
7172
if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) {
7273
return prefixList[i]

src/modules/open-ai/index.ts

Lines changed: 30 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ import { GrammyError, InlineKeyboard } from 'grammy'
22
import OpenAI from 'openai'
33
import { type Logger, pino } from 'pino'
44

5-
import { getCommandNamePrompt } from '../1country/utils'
65
import { type BotPayments } from '../payment'
76
import {
87
type ChatConversation,
@@ -76,7 +75,7 @@ export class OpenAIBot implements PayableBot {
7675
ctx: OnMessageContext | OnCallBackQueryData
7776
): boolean {
7877
const hasCommand = ctx.hasCommand(
79-
Object.values(SupportedCommands).map((command) => command.name)
78+
Object.values(SupportedCommands).map((command) => command)
8079
)
8180
if (isMentioned(ctx)) {
8281
return true
@@ -104,18 +103,18 @@ export class OpenAIBot implements PayableBot {
104103
return 0
105104
}
106105
if (
107-
ctx.hasCommand([SupportedCommands.dalle.name,
108-
SupportedCommands.dalleImg.name,
109-
SupportedCommands.dalleShort.name,
110-
SupportedCommands.dalleShorter.name])
106+
ctx.hasCommand([SupportedCommands.dalle,
107+
SupportedCommands.dalleImg,
108+
SupportedCommands.dalleShort,
109+
SupportedCommands.dalleShorter])
111110
) {
112111
const imageNumber = ctx.session.openAi.imageGen.numImages
113112
const imageSize = ctx.session.openAi.imageGen.imgSize
114113
const model = getDalleModel(imageSize)
115114
const price = getDalleModelPrice(model, true, imageNumber) // cents
116115
return price * priceAdjustment
117116
}
118-
if (ctx.hasCommand(SupportedCommands.genImgEn.name)) {
117+
if (ctx.hasCommand(SupportedCommands.genImgEn)) {
119118
const imageNumber = ctx.session.openAi.imageGen.numImages
120119
const imageSize = ctx.session.openAi.imageGen.imgSize
121120
const chatModelName = ctx.session.openAi.chatGpt.model
@@ -147,7 +146,7 @@ export class OpenAIBot implements PayableBot {
147146
const prompt = ctx.message?.caption ?? ctx.message?.text
148147
if (prompt && !isNaN(+prompt)) { // && !isNaN(+prompt)
149148
return true
150-
} else if (prompt && (ctx.chat?.type === 'private' || ctx.hasCommand(SupportedCommands.vision.name))) {
149+
} else if (prompt && (ctx.chat?.type === 'private' || ctx.hasCommand(SupportedCommands.vision))) {
151150
return true
152151
}
153152
}
@@ -182,7 +181,7 @@ export class OpenAIBot implements PayableBot {
182181
}
183182

184183
if (
185-
ctx.hasCommand(SupportedCommands.chat.name) ||
184+
ctx.hasCommand(SupportedCommands.chat) ||
186185
(ctx.message?.text?.startsWith('chat ') && ctx.chat?.type === 'private')
187186
) {
188187
ctx.session.openAi.chatGpt.model = ChatGPTModelsEnum.GPT_4
@@ -191,49 +190,48 @@ export class OpenAIBot implements PayableBot {
191190
}
192191

193192
if (
194-
ctx.hasCommand(SupportedCommands.new.name) ||
193+
ctx.hasCommand(SupportedCommands.new) ||
195194
(ctx.message?.text?.startsWith('new ') && ctx.chat?.type === 'private')
196195
) {
197-
ctx.session.openAi.chatGpt.model = ChatGPTModelsEnum.GPT_4
198196
await this.onEnd(ctx)
199197
await this.onChat(ctx)
200198
return
201199
}
202200

203201
if (
204-
ctx.hasCommand(SupportedCommands.ask.name) ||
202+
ctx.hasCommand(SupportedCommands.ask) ||
205203
(ctx.message?.text?.startsWith('ask ') && ctx.chat?.type === 'private')
206204
) {
207205
ctx.session.openAi.chatGpt.model = ChatGPTModelsEnum.GPT_4
208206
await this.onChat(ctx)
209207
return
210208
}
211209

212-
if (ctx.hasCommand(SupportedCommands.ask35.name)) {
210+
if (ctx.hasCommand(SupportedCommands.ask35)) {
213211
ctx.session.openAi.chatGpt.model = ChatGPTModelsEnum.GPT_35_TURBO_16K
214212
await this.onChat(ctx)
215213
return
216214
}
217215

218-
if (ctx.hasCommand(SupportedCommands.gpt4.name)) {
216+
if (ctx.hasCommand(SupportedCommands.gpt4)) {
219217
ctx.session.openAi.chatGpt.model = ChatGPTModelsEnum.GPT_4
220218
await this.onChat(ctx)
221219
return
222220
}
223221

224-
if (ctx.hasCommand(SupportedCommands.gpt.name)) {
222+
if (ctx.hasCommand(SupportedCommands.gpt)) {
225223
ctx.session.openAi.chatGpt.model = ChatGPTModelsEnum.GPT_4
226224
await this.onChat(ctx)
227225
return
228226
}
229227

230-
if (ctx.hasCommand(SupportedCommands.ask32.name)) {
228+
if (ctx.hasCommand(SupportedCommands.ask32)) {
231229
ctx.session.openAi.chatGpt.model = ChatGPTModelsEnum.GPT_4_32K
232230
await this.onChat(ctx)
233231
return
234232
}
235233

236-
if (ctx.hasCommand(SupportedCommands.vision.name)) {
234+
if (ctx.hasCommand(SupportedCommands.vision)) {
237235
const photoUrl = getUrlFromText(ctx)
238236
if (photoUrl) {
239237
const prompt = ctx.match
@@ -252,10 +250,10 @@ export class OpenAIBot implements PayableBot {
252250
}
253251

254252
if (
255-
ctx.hasCommand([SupportedCommands.dalle.name,
256-
SupportedCommands.dalleImg.name,
257-
SupportedCommands.dalleShort.name,
258-
SupportedCommands.dalleShorter.name]) ||
253+
ctx.hasCommand([SupportedCommands.dalle,
254+
SupportedCommands.dalleImg,
255+
SupportedCommands.dalleShort,
256+
SupportedCommands.dalleShorter]) ||
259257
(ctx.message?.text?.startsWith('image ') && ctx.chat?.type === 'private')
260258
) {
261259
let prompt = (ctx.match ? ctx.match : ctx.message?.text) as string
@@ -280,16 +278,16 @@ export class OpenAIBot implements PayableBot {
280278
return
281279
}
282280

283-
if (ctx.hasCommand(SupportedCommands.last.name)) {
281+
if (ctx.hasCommand(SupportedCommands.last)) {
284282
await this.onLast(ctx)
285283
return
286284
}
287285

288286
const text = ctx.message?.text ?? ''
289-
290-
if (hasNewPrefix(text) !== '') {
287+
const newPrefix = hasNewPrefix(text)
288+
if (newPrefix !== '') {
291289
await this.onEnd(ctx)
292-
await this.onPrefix(ctx)
290+
await this.onPrefix(ctx, newPrefix)
293291
return
294292
}
295293

@@ -311,9 +309,9 @@ export class OpenAIBot implements PayableBot {
311309
}
312310
return
313311
}
314-
315-
if (hasChatPrefix(text) !== '') {
316-
await this.onPrefix(ctx)
312+
const prefix = hasChatPrefix(text)
313+
if (prefix !== '') {
314+
await this.onPrefix(ctx, prefix)
317315
return
318316
}
319317

@@ -426,7 +424,7 @@ export class OpenAIBot implements PayableBot {
426424
}
427425
}
428426

429-
async onPrefix (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
427+
async onPrefix (ctx: OnMessageContext | OnCallBackQueryData, prefix: string): Promise<void> {
430428
try {
431429
if (this.botSuspended) {
432430
ctx.transient.analytics.sessionState = RequestState.Error
@@ -436,13 +434,9 @@ export class OpenAIBot implements PayableBot {
436434
ctx.transient.analytics.actualResponseTime = now()
437435
return
438436
}
439-
const { prompt } = getCommandNamePrompt(
440-
ctx,
441-
SupportedCommands
442-
)
443-
const prefix = hasPrefix(prompt)
437+
const prompt = ctx.message?.text?.slice(prefix.length) ?? ''
444438
ctx.session.openAi.chatGpt.requestQueue.push(
445-
await preparePrompt(ctx, prompt.slice(prefix.length))
439+
await preparePrompt(ctx, prompt)
446440
)
447441
if (!ctx.session.openAi.chatGpt.isProcessingQueue) {
448442
ctx.session.openAi.chatGpt.isProcessingQueue = true
@@ -750,6 +744,7 @@ export class OpenAIBot implements PayableBot {
750744
}
751745

752746
async onEnd (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
747+
ctx.session.openAi.chatGpt.model = ChatGPTModelsEnum.GPT_4
753748
ctx.session.openAi.chatGpt.chatConversation = []
754749
ctx.session.openAi.chatGpt.usage = 0
755750
ctx.session.openAi.chatGpt.price = 0

0 commit comments

Comments
 (0)