1
1
import * as dotenv from 'dotenv'
2
2
import OpenAI from 'openai'
3
3
import { HttpsProxyAgent } from 'https-proxy-agent'
4
+ import { tavily } from '@tavily/core'
5
+ import dayjs from 'dayjs'
4
6
import type { AuditConfig , KeyConfig , UserInfo } from '../storage/model'
5
7
import { Status , UsageResponse } from '../storage/model'
6
8
import { convertImageUrl } from '../utils/image'
@@ -10,7 +12,7 @@ import { getCacheApiKeys, getCacheConfig, getOriginConfig } from '../storage/con
10
12
import { sendResponse } from '../utils'
11
13
import { hasAnyRole , isNotEmptyString } from '../utils/is'
12
14
import type { ModelConfig } from '../types'
13
- import { getChatByMessageId , updateRoomChatModel } from '../storage/mongo'
15
+ import { getChatByMessageId , updateChatSearchQuery , updateChatSearchResult } from '../storage/mongo'
14
16
import type { ChatMessage , RequestOptions } from './types'
15
17
16
18
dotenv . config ( )
@@ -49,17 +51,16 @@ export async function initApi(key: KeyConfig) {
49
51
const processThreads : { userId : string ; abort : AbortController ; messageId : string } [ ] = [ ]
50
52
51
53
async function chatReplyProcess ( options : RequestOptions ) {
54
+ const globalConfig = await getCacheConfig ( )
52
55
const model = options . room . chatModel
56
+ const searchEnabled = options . room . searchEnabled
53
57
const key = await getRandomApiKey ( options . user , model )
54
58
const userId = options . user . _id . toString ( )
55
59
const maxContextCount = options . user . advanced . maxContextCount ?? 20
56
60
const messageId = options . messageId
57
61
if ( key == null || key === undefined )
58
62
throw new Error ( '没有对应的apikeys配置。请再试一次 | No available apikeys configuration. Please try again.' )
59
63
60
- // Add Chat Record
61
- updateRoomChatModel ( userId , options . room . roomId , model )
62
-
63
64
const { message, uploadFileKeys, parentMessageId, process, systemMessage, temperature, top_p } = options
64
65
65
66
try {
@@ -93,6 +94,52 @@ async function chatReplyProcess(options: RequestOptions) {
93
94
content,
94
95
} )
95
96
97
+ const searchConfig = globalConfig . searchConfig
98
+ if ( searchConfig . enabled && searchConfig ?. options ?. apiKey && searchEnabled ) {
99
+ messages [ 0 ] . content = `Before you formally answer the question, you have the option to search the web to get more context from the web.
100
+ Please judge whether you need to search the Internet.
101
+ If you need to search, please return the query word to be submitted to the search engine.
102
+ If you do not need to search, the result will be empty.
103
+ Please do not actually answer the question.
104
+ Just wrap the result in <search_query></search_query>> and return it in plain text, such as <search_query>example search query</search_query> or <search_query></search_query>`
105
+ const completion = await openai . chat . completions . create ( {
106
+ model,
107
+ messages,
108
+ } )
109
+ let searchQuery : string = completion . choices [ 0 ] . message . content
110
+ const match = searchQuery . match ( / < s e a r c h _ q u e r y > ( [ \s \S ] * ) < \/ s e a r c h _ q u e r y > / i)
111
+ if ( match )
112
+ searchQuery = match [ 1 ] . trim ( )
113
+ else
114
+ searchQuery = ''
115
+
116
+ if ( searchQuery ) {
117
+ await updateChatSearchQuery ( messageId , searchQuery )
118
+
119
+ const tvly = tavily ( { apiKey : searchConfig . options ?. apiKey } )
120
+ const response = await tvly . search (
121
+ searchQuery ,
122
+ {
123
+ includeRawContent : true ,
124
+ timeout : 300 ,
125
+ } ,
126
+ )
127
+
128
+ const searchResult = JSON . stringify ( response )
129
+ await updateChatSearchResult ( messageId , searchResult )
130
+
131
+ messages . push ( {
132
+ role : 'user' ,
133
+ content : `Additional information from web searche engine.
134
+ search query: <search_query>${ searchQuery } </search_query>
135
+ search result: <search_result>${ searchResult } </search_result>
136
+ current time: <date>${ dayjs ( ) . format ( 'YYYY-MM-DD HH:mm:ss' ) } </date>` ,
137
+ } )
138
+ }
139
+ }
140
+
141
+ messages [ 0 ] . content = systemMessage
142
+
96
143
// Create the chat completion with streaming
97
144
const stream = await openai . chat . completions . create ( {
98
145
model,
0 commit comments