Skip to content

Commit

Permalink
add llms/completion call and stream normalization for vertex and clau…
Browse files Browse the repository at this point in the history
…de models
  • Loading branch information
fegloff committed May 28, 2024
1 parent e0d9657 commit b7c39da
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 22 deletions.
31 changes: 11 additions & 20 deletions src/modules/llms/api/athropic.ts
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,11 @@ export const anthropicStreamCompletion = async (
stream: true,
system: config.openAi.chatGpt.chatCompletionContext,
max_tokens: limitTokens ? +config.openAi.chatGpt.maxTokens : undefined,
messages: conversation.filter(c => c.model === model).map(m => { return { content: m.content, role: m.role } })
messages: conversation.filter(c => c.model === model) // .map(m => { return { content: m.content, role: m.role } })
}
let wordCount = 0
let wordCountMinimum = 2
const url = `${API_ENDPOINT}/anthropic/completions`
const url = `${API_ENDPOINT}/llms/completions` // `${API_ENDPOINT}/anthropic/completions`
if (!ctx.chat?.id) {
throw new Error('Context chat id should not be empty after openAI streaming')
}
Expand All @@ -90,26 +90,17 @@ export const anthropicStreamCompletion = async (
const msg = chunk.toString()
if (msg) {
if (msg.includes('Input Token:')) {
const regex = /Input Token: (\d+)(.*)/
// Execute the regular expression
const match = regex.exec(msg)
if (match) {
inputTokens = match[1].trim() // Extract the integer part
if (match.length >= 3) {
completion += match[2]
}
}
} else if (msg.startsWith('Output Tokens')) {
outputTokens = msg.split('Output Tokens: ')[1].trim()
const tokenMsg = msg.split('Input Token: ')[1]
inputTokens = tokenMsg.split('Output Tokens: ')[0]
outputTokens = tokenMsg.split('Output Tokens: ')[1]
completion = completion.split('Input Token: ')[0]
} else if (msg.includes('Output Tokens: ')) {
outputTokens = msg.split('Output Tokens: ')[1]
completion = completion.split('Output Tokens: ')[0]
} else {
wordCount++
completion += msg
if (msg.includes('Output Tokens:')) {
outputTokens = msg.split('Output Tokens: ')[1].trim()
// outputTokens = tokenMsg.split('Output Tokens: ')[1].trim()
completion = completion.split('Output Tokens: ')[0]
}
if (wordCount > wordCountMinimum) { // if (chunck === '.' && wordCount > wordCountMinimum) {
if (wordCount > wordCountMinimum) {
if (wordCountMinimum < 64) {
wordCountMinimum *= 2
}
Expand All @@ -125,7 +116,7 @@ export const anthropicStreamCompletion = async (
if (e.error_code !== 400) {
throw e
} else {
logger.error(e)
logger.error(e.message)
}
} else {
throw e
Expand Down
4 changes: 2 additions & 2 deletions src/modules/llms/api/vertex.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ export const vertexStreamCompletion = async (
system: config.openAi.chatGpt.chatCompletionContext,
max_tokens: limitTokens ? +config.openAi.chatGpt.maxTokens : undefined,
messages: conversation.filter(c => c.model === model)
.map(m => { return { parts: { text: m.content }, role: m.role !== 'user' ? 'model' : 'user' } })
// .map(m => { return { parts: { text: m.content }, role: m.role !== 'user' ? 'model' : 'user' } })
}
const url = `${API_ENDPOINT}/vertex/completions/gemini`
const url = `${API_ENDPOINT}/llms/completions` // `${API_ENDPOINT}/vertex/completions/gemini`
if (!ctx.chat?.id) {
throw new Error('Context chat id should not be empty after openAI streaming')
}
Expand Down

0 comments on commit b7c39da

Please sign in to comment.