Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update langchain4j.version to v1.0.0-beta1 #76

Merged
merged 2 commits into from
Feb 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions langchain4j-kotlin/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,12 @@
<artifactId>mockito-junit-jupiter</artifactId>
<scope>test</scope>
</dependency>

<dependency>
<groupId>me.kpavlov.aimocks</groupId>
<artifactId>ai-mocks-openai</artifactId>
<scope>test</scope>
</dependency>
</dependencies>

</project>
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
package me.kpavlov.langchain4j.kotlin

import me.kpavlov.aimocks.openai.MockOpenai

object TestEnvironment : me.kpavlov.finchly.BaseTestEnvironment(
dotEnvFileDir = "../",
) {
val openaiApiKey = TestEnvironment.get("OPENAI_API_KEY", "demo")
val openaiApiKey = get("OPENAI_API_KEY", "demo")
val mockOpenAi = MockOpenai()
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import assertk.assertions.hasMessage
import dev.langchain4j.data.message.AiMessage
import dev.langchain4j.data.message.UserMessage.userMessage
import dev.langchain4j.model.chat.StreamingChatLanguageModel
import dev.langchain4j.model.chat.request.ChatRequest
import dev.langchain4j.model.chat.response.ChatResponse
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler
import kotlinx.coroutines.flow.toList
Expand Down Expand Up @@ -40,7 +41,7 @@ internal class StreamingChatLanguageModelExtensionsKtTest {
handler.onPartialResponse(partialToken1)
handler.onPartialResponse(partialToken2)
handler.onCompleteResponse(completeResponse)
}.whenever(mockModel).chat(any(), any())
}.whenever(mockModel).chat(any<ChatRequest>(), any<StreamingChatResponseHandler>())

val flow =
mockModel.chatFlow {
Expand All @@ -58,7 +59,7 @@ internal class StreamingChatLanguageModelExtensionsKtTest {
)

// Verify interactions
verify(mockModel).chat(any(), any())
verify(mockModel).chat(any<ChatRequest>(), any<StreamingChatResponseHandler>())
}

@Test
Expand All @@ -70,7 +71,7 @@ internal class StreamingChatLanguageModelExtensionsKtTest {
doAnswer {
val handler = it.arguments[1] as StreamingChatResponseHandler
handler.onError(error)
}.whenever(mockModel).chat(any(), any())
}.whenever(mockModel).chat(any<ChatRequest>(), any<StreamingChatResponseHandler>())

val flow =
mockModel.chatFlow {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,33 +8,31 @@ import dev.langchain4j.data.message.SystemMessage.systemMessage
import dev.langchain4j.data.message.UserMessage.userMessage
import dev.langchain4j.model.chat.StreamingChatLanguageModel
import dev.langchain4j.model.chat.response.ChatResponse
import dev.langchain4j.model.openai.OpenAiStreamingChatModel
import kotlinx.coroutines.delay
import kotlinx.coroutines.flow.flow
import kotlinx.coroutines.test.runTest
import kotlinx.coroutines.yield
import me.kpavlov.langchain4j.kotlin.TestEnvironment
import me.kpavlov.langchain4j.kotlin.TestEnvironment.mockOpenAi
import me.kpavlov.langchain4j.kotlin.loadDocument
import me.kpavlov.langchain4j.kotlin.model.chat.StreamingChatLanguageModelReply.CompleteResponse
import me.kpavlov.langchain4j.kotlin.model.chat.StreamingChatLanguageModelReply.PartialResponse
import org.junit.jupiter.api.AfterEach
import org.junit.jupiter.api.Assertions.fail
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable
import org.slf4j.LoggerFactory
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicReference

@EnabledIfEnvironmentVariable(
named = "OPENAI_API_KEY",
matches = ".+",
)
class StreamingChatLanguageModelIT {
internal class StreamingChatLanguageModelIT {
private val logger = LoggerFactory.getLogger(javaClass)

private val model: StreamingChatLanguageModel =
OpenAiStreamingChatModel
.builder()
.apiKey(TestEnvironment.openaiApiKey)
.modelName("gpt-4o-mini")
.temperature(0.0)
.maxTokens(100)
.build()
private val model: StreamingChatLanguageModel = createOpenAiStreamingModel()

@AfterEach
fun afterEach() {
mockOpenAi.verifyNoUnmatchedRequests()
}

@Test
fun `StreamingChatLanguageModel should generateFlow`() =
Expand All @@ -54,9 +52,15 @@ class StreamingChatLanguageModelIT {
""".trimIndent(),
)

setupMockResponseIfNecessary(
systemMessage.text(),
"What does Blumblefang love",
"Blumblefang loves to help and cookies",
)

val responseRef = AtomicReference<ChatResponse?>()

val collectedTokens = mutableListOf<String>()
val collectedTokens = ConcurrentLinkedQueue<String>()

model
.chatFlow {
Expand All @@ -66,10 +70,11 @@ class StreamingChatLanguageModelIT {
when (it) {
is PartialResponse -> {
println("Token: '${it.token}'")
collectedTokens.add(it.token)
collectedTokens += it.token
}

is CompleteResponse -> responseRef.set(it.response)
is StreamingChatLanguageModelReply.Error -> fail("Error", it.cause)
else -> fail("Unsupported event: $it")
}
}
Expand All @@ -82,4 +87,31 @@ class StreamingChatLanguageModelIT {
assertThat(collectedTokens.joinToString("")).isEqualTo(textContent)
assertThat(textContent).contains("Blumblefang loves to help")
}

fun setupMockResponseIfNecessary(
expectedSystemMessage: String,
expectedUserMessage: String,
expectedAnswer: String,
) {
if (TestEnvironment["OPENAI_API_KEY"] != null) {
logger.error("Running with real OpenAI API")
return
}
logger.error("Running with Mock OpenAI API (Ai-Mocks/Mokksy)")

mockOpenAi.completion {
requestBodyContains(expectedSystemMessage)
requestBodyContains(expectedUserMessage)
} respondsStream {
responseFlow =
flow {
expectedAnswer.split(" ").forEach { token ->
emit("$token ")
yield()
delay(42)
}
}
sendDone = true
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
package me.kpavlov.langchain4j.kotlin.model.chat

import dev.langchain4j.model.chat.StreamingChatLanguageModel
import dev.langchain4j.model.openai.OpenAiStreamingChatModel
import dev.langchain4j.model.openai.OpenAiStreamingChatModel.OpenAiStreamingChatModelBuilder
import me.kpavlov.langchain4j.kotlin.TestEnvironment

internal fun createOpenAiStreamingModel(
configurer: OpenAiStreamingChatModelBuilder.() -> Unit = {},
): StreamingChatLanguageModel {
val modelBuilder =
OpenAiStreamingChatModel
.builder()
.modelName("gpt-4o-mini")
.temperature(0.1)
.maxTokens(100)

val apiKey = TestEnvironment["OPENAI_API_KEY"]
if (apiKey != null) {
modelBuilder.apiKey(apiKey)
} else {
modelBuilder
.apiKey("my-key")
.baseUrl("http://localhost:${TestEnvironment.mockOpenAi.port()}/v1")
}
configurer.invoke(modelBuilder)

return modelBuilder.build()
}
24 changes: 18 additions & 6 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
</scm>

<properties>
<argLine />
<argLine/>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<kotlin.code.style>official</kotlin.code.style>
<java.version>17</java.version>
Expand All @@ -53,11 +53,12 @@
<maven.compiler.release>${java.version}</maven.compiler.release>
<maven.compiler.source>${java.version}</maven.compiler.source>
<!-- Dependencies -->
<ai-mocks.version>0.1.1</ai-mocks.version>
<awaitility.version>4.2.2</awaitility.version>
<finchly.version>0.1.1</finchly.version>
<junit.version>5.11.4</junit.version>
<kotlinx.version>1.10.1</kotlinx.version>
<langchain4j.version>1.0.0-alpha1</langchain4j.version>
<langchain4j.version>1.0.0-beta1</langchain4j.version>
<mockito-kotlin.version>5.4.0</mockito-kotlin.version>
<mockito.version>5.15.2</mockito.version>
<slf4j.version>2.0.16</slf4j.version>
Expand Down Expand Up @@ -111,6 +112,13 @@
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>me.kpavlov.aimocks</groupId>
<artifactId>bom</artifactId>
<version>${ai-mocks.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.mockito.kotlin</groupId>
<artifactId>mockito-kotlin</artifactId>
Expand All @@ -135,6 +143,7 @@
<version>${finchly.version}</version>
<scope>test</scope>
</dependency>

</dependencies>
</dependencyManagement>

Expand All @@ -148,7 +157,6 @@
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility-kotlin</artifactId>
<version>${awaitility.version}</version>
<scope>test</scope>
</dependency>
<dependency>
Expand Down Expand Up @@ -198,6 +206,10 @@
<groupId>org.jetbrains.dokka</groupId>
<artifactId>dokka-maven-plugin</artifactId>
<version>2.0.0</version>
<configuration>
<languageVersion>1.9</languageVersion>
<detail>false</detail>
</configuration>
</plugin>
<plugin>
<groupId>com.github.ozsie</groupId>
Expand Down Expand Up @@ -239,17 +251,17 @@
<!-- optional: limit format enforcement to just the files changed by this feature branch -->
<ratchetFrom>origin/main</ratchetFrom>
<java>
<palantirJavaFormat />
<palantirJavaFormat/>
</java>
<kotlin>
<ktlint />
<ktlint/>
</kotlin>
<markdown>
<includes>
<!-- You have to set the target manually -->
<include>**/*.md</include>
</includes>
<flexmark />
<flexmark/>
</markdown>
<pom>
<includes>
Expand Down
2 changes: 1 addition & 1 deletion samples/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
<finchly.version>0.1.1</finchly.version>
<kotlinx.version>1.9.0</kotlinx.version>
<langchain4j-kotlin.version>0.1.7</langchain4j-kotlin.version>
<langchain4j.version>1.0.0-alpha1</langchain4j.version>
<langchain4j.version>1.0.0-beta1</langchain4j.version>
<slf4j.version>2.0.16</slf4j.version>
</properties>

Expand Down
Loading